##// END OF EJS Templates
perf: add a new `perfmergecopies` command...
marmoute -
r42576:f5f0a949 default
parent child Browse files
Show More
@@ -1,2926 +1,2944 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 if pycompat.ispy3:
131 if pycompat.ispy3:
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 else:
133 else:
134 _maxint = sys.maxint
134 _maxint = sys.maxint
135 except (ImportError, AttributeError):
135 except (ImportError, AttributeError):
136 import inspect
136 import inspect
137 getargspec = inspect.getargspec
137 getargspec = inspect.getargspec
138 _byteskwargs = identity
138 _byteskwargs = identity
139 fsencode = identity # no py3 support
139 fsencode = identity # no py3 support
140 _maxint = sys.maxint # no py3 support
140 _maxint = sys.maxint # no py3 support
141 _sysstr = lambda x: x # no py3 support
141 _sysstr = lambda x: x # no py3 support
142 _xrange = xrange
142 _xrange = xrange
143
143
144 try:
144 try:
145 # 4.7+
145 # 4.7+
146 queue = pycompat.queue.Queue
146 queue = pycompat.queue.Queue
147 except (AttributeError, ImportError):
147 except (AttributeError, ImportError):
148 # <4.7.
148 # <4.7.
149 try:
149 try:
150 queue = pycompat.queue
150 queue = pycompat.queue
151 except (AttributeError, ImportError):
151 except (AttributeError, ImportError):
152 queue = util.queue
152 queue = util.queue
153
153
154 try:
154 try:
155 from mercurial import logcmdutil
155 from mercurial import logcmdutil
156 makelogtemplater = logcmdutil.maketemplater
156 makelogtemplater = logcmdutil.maketemplater
157 except (AttributeError, ImportError):
157 except (AttributeError, ImportError):
158 try:
158 try:
159 makelogtemplater = cmdutil.makelogtemplater
159 makelogtemplater = cmdutil.makelogtemplater
160 except (AttributeError, ImportError):
160 except (AttributeError, ImportError):
161 makelogtemplater = None
161 makelogtemplater = None
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # define util.safehasattr forcibly, because util.safehasattr has been
164 # define util.safehasattr forcibly, because util.safehasattr has been
165 # available since 1.9.3 (or 94b200a11cf7)
165 # available since 1.9.3 (or 94b200a11cf7)
166 _undefined = object()
166 _undefined = object()
167 def safehasattr(thing, attr):
167 def safehasattr(thing, attr):
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 setattr(util, 'safehasattr', safehasattr)
169 setattr(util, 'safehasattr', safehasattr)
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.timer forcibly, because util.timer has been available
172 # define util.timer forcibly, because util.timer has been available
173 # since ae5d60bb70c9
173 # since ae5d60bb70c9
174 if safehasattr(time, 'perf_counter'):
174 if safehasattr(time, 'perf_counter'):
175 util.timer = time.perf_counter
175 util.timer = time.perf_counter
176 elif os.name == b'nt':
176 elif os.name == b'nt':
177 util.timer = time.clock
177 util.timer = time.clock
178 else:
178 else:
179 util.timer = time.time
179 util.timer = time.time
180
180
181 # for "historical portability":
181 # for "historical portability":
182 # use locally defined empty option list, if formatteropts isn't
182 # use locally defined empty option list, if formatteropts isn't
183 # available, because commands.formatteropts has been available since
183 # available, because commands.formatteropts has been available since
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 # available since 2.2 (or ae5f92e154d3)
185 # available since 2.2 (or ae5f92e154d3)
186 formatteropts = getattr(cmdutil, "formatteropts",
186 formatteropts = getattr(cmdutil, "formatteropts",
187 getattr(commands, "formatteropts", []))
187 getattr(commands, "formatteropts", []))
188
188
189 # for "historical portability":
189 # for "historical portability":
190 # use locally defined option list, if debugrevlogopts isn't available,
190 # use locally defined option list, if debugrevlogopts isn't available,
191 # because commands.debugrevlogopts has been available since 3.7 (or
191 # because commands.debugrevlogopts has been available since 3.7 (or
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 # since 1.9 (or a79fea6b3e77).
193 # since 1.9 (or a79fea6b3e77).
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 getattr(commands, "debugrevlogopts", [
195 getattr(commands, "debugrevlogopts", [
196 (b'c', b'changelog', False, (b'open changelog')),
196 (b'c', b'changelog', False, (b'open changelog')),
197 (b'm', b'manifest', False, (b'open manifest')),
197 (b'm', b'manifest', False, (b'open manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
199 ]))
199 ]))
200
200
201 cmdtable = {}
201 cmdtable = {}
202
202
203 # for "historical portability":
203 # for "historical portability":
204 # define parsealiases locally, because cmdutil.parsealiases has been
204 # define parsealiases locally, because cmdutil.parsealiases has been
205 # available since 1.5 (or 6252852b4332)
205 # available since 1.5 (or 6252852b4332)
206 def parsealiases(cmd):
206 def parsealiases(cmd):
207 return cmd.split(b"|")
207 return cmd.split(b"|")
208
208
209 if safehasattr(registrar, 'command'):
209 if safehasattr(registrar, 'command'):
210 command = registrar.command(cmdtable)
210 command = registrar.command(cmdtable)
211 elif safehasattr(cmdutil, 'command'):
211 elif safehasattr(cmdutil, 'command'):
212 command = cmdutil.command(cmdtable)
212 command = cmdutil.command(cmdtable)
213 if b'norepo' not in getargspec(command).args:
213 if b'norepo' not in getargspec(command).args:
214 # for "historical portability":
214 # for "historical portability":
215 # wrap original cmdutil.command, because "norepo" option has
215 # wrap original cmdutil.command, because "norepo" option has
216 # been available since 3.1 (or 75a96326cecb)
216 # been available since 3.1 (or 75a96326cecb)
217 _command = command
217 _command = command
218 def command(name, options=(), synopsis=None, norepo=False):
218 def command(name, options=(), synopsis=None, norepo=False):
219 if norepo:
219 if norepo:
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 return _command(name, list(options), synopsis)
221 return _command(name, list(options), synopsis)
222 else:
222 else:
223 # for "historical portability":
223 # for "historical portability":
224 # define "@command" annotation locally, because cmdutil.command
224 # define "@command" annotation locally, because cmdutil.command
225 # has been available since 1.9 (or 2daa5179e73f)
225 # has been available since 1.9 (or 2daa5179e73f)
226 def command(name, options=(), synopsis=None, norepo=False):
226 def command(name, options=(), synopsis=None, norepo=False):
227 def decorator(func):
227 def decorator(func):
228 if synopsis:
228 if synopsis:
229 cmdtable[name] = func, list(options), synopsis
229 cmdtable[name] = func, list(options), synopsis
230 else:
230 else:
231 cmdtable[name] = func, list(options)
231 cmdtable[name] = func, list(options)
232 if norepo:
232 if norepo:
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 return func
234 return func
235 return decorator
235 return decorator
236
236
237 try:
237 try:
238 import mercurial.registrar
238 import mercurial.registrar
239 import mercurial.configitems
239 import mercurial.configitems
240 configtable = {}
240 configtable = {}
241 configitem = mercurial.registrar.configitem(configtable)
241 configitem = mercurial.registrar.configitem(configtable)
242 configitem(b'perf', b'presleep',
242 configitem(b'perf', b'presleep',
243 default=mercurial.configitems.dynamicdefault,
243 default=mercurial.configitems.dynamicdefault,
244 )
244 )
245 configitem(b'perf', b'stub',
245 configitem(b'perf', b'stub',
246 default=mercurial.configitems.dynamicdefault,
246 default=mercurial.configitems.dynamicdefault,
247 )
247 )
248 configitem(b'perf', b'parentscount',
248 configitem(b'perf', b'parentscount',
249 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
250 )
250 )
251 configitem(b'perf', b'all-timing',
251 configitem(b'perf', b'all-timing',
252 default=mercurial.configitems.dynamicdefault,
252 default=mercurial.configitems.dynamicdefault,
253 )
253 )
254 configitem(b'perf', b'pre-run',
254 configitem(b'perf', b'pre-run',
255 default=mercurial.configitems.dynamicdefault,
255 default=mercurial.configitems.dynamicdefault,
256 )
256 )
257 configitem(b'perf', b'profile-benchmark',
257 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
258 default=mercurial.configitems.dynamicdefault,
259 )
259 )
260 configitem(b'perf', b'run-limits',
260 configitem(b'perf', b'run-limits',
261 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
262 )
262 )
263 except (ImportError, AttributeError):
263 except (ImportError, AttributeError):
264 pass
264 pass
265
265
266 def getlen(ui):
266 def getlen(ui):
267 if ui.configbool(b"perf", b"stub", False):
267 if ui.configbool(b"perf", b"stub", False):
268 return lambda x: 1
268 return lambda x: 1
269 return len
269 return len
270
270
271 class noop(object):
271 class noop(object):
272 """dummy context manager"""
272 """dummy context manager"""
273 def __enter__(self):
273 def __enter__(self):
274 pass
274 pass
275 def __exit__(self, *args):
275 def __exit__(self, *args):
276 pass
276 pass
277
277
278 NOOPCTX = noop()
278 NOOPCTX = noop()
279
279
280 def gettimer(ui, opts=None):
280 def gettimer(ui, opts=None):
281 """return a timer function and formatter: (timer, formatter)
281 """return a timer function and formatter: (timer, formatter)
282
282
283 This function exists to gather the creation of formatter in a single
283 This function exists to gather the creation of formatter in a single
284 place instead of duplicating it in all performance commands."""
284 place instead of duplicating it in all performance commands."""
285
285
286 # enforce an idle period before execution to counteract power management
286 # enforce an idle period before execution to counteract power management
287 # experimental config: perf.presleep
287 # experimental config: perf.presleep
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
289
289
290 if opts is None:
290 if opts is None:
291 opts = {}
291 opts = {}
292 # redirect all to stderr unless buffer api is in use
292 # redirect all to stderr unless buffer api is in use
293 if not ui._buffers:
293 if not ui._buffers:
294 ui = ui.copy()
294 ui = ui.copy()
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 if uifout:
296 if uifout:
297 # for "historical portability":
297 # for "historical portability":
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 uifout.set(ui.ferr)
299 uifout.set(ui.ferr)
300
300
301 # get a formatter
301 # get a formatter
302 uiformatter = getattr(ui, 'formatter', None)
302 uiformatter = getattr(ui, 'formatter', None)
303 if uiformatter:
303 if uiformatter:
304 fm = uiformatter(b'perf', opts)
304 fm = uiformatter(b'perf', opts)
305 else:
305 else:
306 # for "historical portability":
306 # for "historical portability":
307 # define formatter locally, because ui.formatter has been
307 # define formatter locally, because ui.formatter has been
308 # available since 2.2 (or ae5f92e154d3)
308 # available since 2.2 (or ae5f92e154d3)
309 from mercurial import node
309 from mercurial import node
310 class defaultformatter(object):
310 class defaultformatter(object):
311 """Minimized composition of baseformatter and plainformatter
311 """Minimized composition of baseformatter and plainformatter
312 """
312 """
313 def __init__(self, ui, topic, opts):
313 def __init__(self, ui, topic, opts):
314 self._ui = ui
314 self._ui = ui
315 if ui.debugflag:
315 if ui.debugflag:
316 self.hexfunc = node.hex
316 self.hexfunc = node.hex
317 else:
317 else:
318 self.hexfunc = node.short
318 self.hexfunc = node.short
319 def __nonzero__(self):
319 def __nonzero__(self):
320 return False
320 return False
321 __bool__ = __nonzero__
321 __bool__ = __nonzero__
322 def startitem(self):
322 def startitem(self):
323 pass
323 pass
324 def data(self, **data):
324 def data(self, **data):
325 pass
325 pass
326 def write(self, fields, deftext, *fielddata, **opts):
326 def write(self, fields, deftext, *fielddata, **opts):
327 self._ui.write(deftext % fielddata, **opts)
327 self._ui.write(deftext % fielddata, **opts)
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 if cond:
329 if cond:
330 self._ui.write(deftext % fielddata, **opts)
330 self._ui.write(deftext % fielddata, **opts)
331 def plain(self, text, **opts):
331 def plain(self, text, **opts):
332 self._ui.write(text, **opts)
332 self._ui.write(text, **opts)
333 def end(self):
333 def end(self):
334 pass
334 pass
335 fm = defaultformatter(ui, b'perf', opts)
335 fm = defaultformatter(ui, b'perf', opts)
336
336
337 # stub function, runs code only once instead of in a loop
337 # stub function, runs code only once instead of in a loop
338 # experimental config: perf.stub
338 # experimental config: perf.stub
339 if ui.configbool(b"perf", b"stub", False):
339 if ui.configbool(b"perf", b"stub", False):
340 return functools.partial(stub_timer, fm), fm
340 return functools.partial(stub_timer, fm), fm
341
341
342 # experimental config: perf.all-timing
342 # experimental config: perf.all-timing
343 displayall = ui.configbool(b"perf", b"all-timing", False)
343 displayall = ui.configbool(b"perf", b"all-timing", False)
344
344
345 # experimental config: perf.run-limits
345 # experimental config: perf.run-limits
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 limits = []
347 limits = []
348 for item in limitspec:
348 for item in limitspec:
349 parts = item.split(b'-', 1)
349 parts = item.split(b'-', 1)
350 if len(parts) < 2:
350 if len(parts) < 2:
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 % item))
352 % item))
353 continue
353 continue
354 try:
354 try:
355 time_limit = float(pycompat.sysstr(parts[0]))
355 time_limit = float(pycompat.sysstr(parts[0]))
356 except ValueError as e:
356 except ValueError as e:
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 % (pycompat.bytestr(e), item)))
358 % (pycompat.bytestr(e), item)))
359 continue
359 continue
360 try:
360 try:
361 run_limit = int(pycompat.sysstr(parts[1]))
361 run_limit = int(pycompat.sysstr(parts[1]))
362 except ValueError as e:
362 except ValueError as e:
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 % (pycompat.bytestr(e), item)))
364 % (pycompat.bytestr(e), item)))
365 continue
365 continue
366 limits.append((time_limit, run_limit))
366 limits.append((time_limit, run_limit))
367 if not limits:
367 if not limits:
368 limits = DEFAULTLIMITS
368 limits = DEFAULTLIMITS
369
369
370 profiler = None
370 profiler = None
371 if profiling is not None:
371 if profiling is not None:
372 if ui.configbool(b"perf", b"profile-benchmark", False):
372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 profiler = profiling.profile(ui)
373 profiler = profiling.profile(ui)
374
374
375 prerun = getint(ui, b"perf", b"pre-run", 0)
375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 prerun=prerun, profiler=profiler)
377 prerun=prerun, profiler=profiler)
378 return t, fm
378 return t, fm
379
379
380 def stub_timer(fm, func, setup=None, title=None):
380 def stub_timer(fm, func, setup=None, title=None):
381 if setup is not None:
381 if setup is not None:
382 setup()
382 setup()
383 func()
383 func()
384
384
385 @contextlib.contextmanager
385 @contextlib.contextmanager
386 def timeone():
386 def timeone():
387 r = []
387 r = []
388 ostart = os.times()
388 ostart = os.times()
389 cstart = util.timer()
389 cstart = util.timer()
390 yield r
390 yield r
391 cstop = util.timer()
391 cstop = util.timer()
392 ostop = os.times()
392 ostop = os.times()
393 a, b = ostart, ostop
393 a, b = ostart, ostop
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395
395
396
396
397 # list of stop condition (elapsed time, minimal run count)
397 # list of stop condition (elapsed time, minimal run count)
398 DEFAULTLIMITS = (
398 DEFAULTLIMITS = (
399 (3.0, 100),
399 (3.0, 100),
400 (10.0, 3),
400 (10.0, 3),
401 )
401 )
402
402
403 def _timer(fm, func, setup=None, title=None, displayall=False,
403 def _timer(fm, func, setup=None, title=None, displayall=False,
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 gc.collect()
405 gc.collect()
406 results = []
406 results = []
407 begin = util.timer()
407 begin = util.timer()
408 count = 0
408 count = 0
409 if profiler is None:
409 if profiler is None:
410 profiler = NOOPCTX
410 profiler = NOOPCTX
411 for i in range(prerun):
411 for i in range(prerun):
412 if setup is not None:
412 if setup is not None:
413 setup()
413 setup()
414 func()
414 func()
415 keepgoing = True
415 keepgoing = True
416 while keepgoing:
416 while keepgoing:
417 if setup is not None:
417 if setup is not None:
418 setup()
418 setup()
419 with profiler:
419 with profiler:
420 with timeone() as item:
420 with timeone() as item:
421 r = func()
421 r = func()
422 profiler = NOOPCTX
422 profiler = NOOPCTX
423 count += 1
423 count += 1
424 results.append(item[0])
424 results.append(item[0])
425 cstop = util.timer()
425 cstop = util.timer()
426 # Look for a stop condition.
426 # Look for a stop condition.
427 elapsed = cstop - begin
427 elapsed = cstop - begin
428 for t, mincount in limits:
428 for t, mincount in limits:
429 if elapsed >= t and count >= mincount:
429 if elapsed >= t and count >= mincount:
430 keepgoing = False
430 keepgoing = False
431 break
431 break
432
432
433 formatone(fm, results, title=title, result=r,
433 formatone(fm, results, title=title, result=r,
434 displayall=displayall)
434 displayall=displayall)
435
435
436 def formatone(fm, timings, title=None, result=None, displayall=False):
436 def formatone(fm, timings, title=None, result=None, displayall=False):
437
437
438 count = len(timings)
438 count = len(timings)
439
439
440 fm.startitem()
440 fm.startitem()
441
441
442 if title:
442 if title:
443 fm.write(b'title', b'! %s\n', title)
443 fm.write(b'title', b'! %s\n', title)
444 if result:
444 if result:
445 fm.write(b'result', b'! result: %s\n', result)
445 fm.write(b'result', b'! result: %s\n', result)
446 def display(role, entry):
446 def display(role, entry):
447 prefix = b''
447 prefix = b''
448 if role != b'best':
448 if role != b'best':
449 prefix = b'%s.' % role
449 prefix = b'%s.' % role
450 fm.plain(b'!')
450 fm.plain(b'!')
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 fm.write(prefix + b'user', b' user %f', entry[1])
453 fm.write(prefix + b'user', b' user %f', entry[1])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 fm.plain(b'\n')
456 fm.plain(b'\n')
457 timings.sort()
457 timings.sort()
458 min_val = timings[0]
458 min_val = timings[0]
459 display(b'best', min_val)
459 display(b'best', min_val)
460 if displayall:
460 if displayall:
461 max_val = timings[-1]
461 max_val = timings[-1]
462 display(b'max', max_val)
462 display(b'max', max_val)
463 avg = tuple([sum(x) / count for x in zip(*timings)])
463 avg = tuple([sum(x) / count for x in zip(*timings)])
464 display(b'avg', avg)
464 display(b'avg', avg)
465 median = timings[len(timings) // 2]
465 median = timings[len(timings) // 2]
466 display(b'median', median)
466 display(b'median', median)
467
467
468 # utilities for historical portability
468 # utilities for historical portability
469
469
470 def getint(ui, section, name, default):
470 def getint(ui, section, name, default):
471 # for "historical portability":
471 # for "historical portability":
472 # ui.configint has been available since 1.9 (or fa2b596db182)
472 # ui.configint has been available since 1.9 (or fa2b596db182)
473 v = ui.config(section, name, None)
473 v = ui.config(section, name, None)
474 if v is None:
474 if v is None:
475 return default
475 return default
476 try:
476 try:
477 return int(v)
477 return int(v)
478 except ValueError:
478 except ValueError:
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 % (section, name, v))
480 % (section, name, v))
481
481
482 def safeattrsetter(obj, name, ignoremissing=False):
482 def safeattrsetter(obj, name, ignoremissing=False):
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484
484
485 This function is aborted, if 'obj' doesn't have 'name' attribute
485 This function is aborted, if 'obj' doesn't have 'name' attribute
486 at runtime. This avoids overlooking removal of an attribute, which
486 at runtime. This avoids overlooking removal of an attribute, which
487 breaks assumption of performance measurement, in the future.
487 breaks assumption of performance measurement, in the future.
488
488
489 This function returns the object to (1) assign a new value, and
489 This function returns the object to (1) assign a new value, and
490 (2) restore an original value to the attribute.
490 (2) restore an original value to the attribute.
491
491
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 abortion, and this function returns None. This is useful to
493 abortion, and this function returns None. This is useful to
494 examine an attribute, which isn't ensured in all Mercurial
494 examine an attribute, which isn't ensured in all Mercurial
495 versions.
495 versions.
496 """
496 """
497 if not util.safehasattr(obj, name):
497 if not util.safehasattr(obj, name):
498 if ignoremissing:
498 if ignoremissing:
499 return None
499 return None
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 b" of performance measurement") % (name, obj))
501 b" of performance measurement") % (name, obj))
502
502
503 origvalue = getattr(obj, _sysstr(name))
503 origvalue = getattr(obj, _sysstr(name))
504 class attrutil(object):
504 class attrutil(object):
505 def set(self, newvalue):
505 def set(self, newvalue):
506 setattr(obj, _sysstr(name), newvalue)
506 setattr(obj, _sysstr(name), newvalue)
507 def restore(self):
507 def restore(self):
508 setattr(obj, _sysstr(name), origvalue)
508 setattr(obj, _sysstr(name), origvalue)
509
509
510 return attrutil()
510 return attrutil()
511
511
512 # utilities to examine each internal API changes
512 # utilities to examine each internal API changes
513
513
514 def getbranchmapsubsettable():
514 def getbranchmapsubsettable():
515 # for "historical portability":
515 # for "historical portability":
516 # subsettable is defined in:
516 # subsettable is defined in:
517 # - branchmap since 2.9 (or 175c6fd8cacc)
517 # - branchmap since 2.9 (or 175c6fd8cacc)
518 # - repoview since 2.5 (or 59a9f18d4587)
518 # - repoview since 2.5 (or 59a9f18d4587)
519 # - repoviewutil since 5.0
519 # - repoviewutil since 5.0
520 for mod in (branchmap, repoview, repoviewutil):
520 for mod in (branchmap, repoview, repoviewutil):
521 subsettable = getattr(mod, 'subsettable', None)
521 subsettable = getattr(mod, 'subsettable', None)
522 if subsettable:
522 if subsettable:
523 return subsettable
523 return subsettable
524
524
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 # branchmap and repoview modules exist, but subsettable attribute
526 # branchmap and repoview modules exist, but subsettable attribute
527 # doesn't)
527 # doesn't)
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 hint=b"use 2.5 or later")
529 hint=b"use 2.5 or later")
530
530
531 def getsvfs(repo):
531 def getsvfs(repo):
532 """Return appropriate object to access files under .hg/store
532 """Return appropriate object to access files under .hg/store
533 """
533 """
534 # for "historical portability":
534 # for "historical portability":
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 svfs = getattr(repo, 'svfs', None)
536 svfs = getattr(repo, 'svfs', None)
537 if svfs:
537 if svfs:
538 return svfs
538 return svfs
539 else:
539 else:
540 return getattr(repo, 'sopener')
540 return getattr(repo, 'sopener')
541
541
542 def getvfs(repo):
542 def getvfs(repo):
543 """Return appropriate object to access files under .hg
543 """Return appropriate object to access files under .hg
544 """
544 """
545 # for "historical portability":
545 # for "historical portability":
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 vfs = getattr(repo, 'vfs', None)
547 vfs = getattr(repo, 'vfs', None)
548 if vfs:
548 if vfs:
549 return vfs
549 return vfs
550 else:
550 else:
551 return getattr(repo, 'opener')
551 return getattr(repo, 'opener')
552
552
553 def repocleartagscachefunc(repo):
553 def repocleartagscachefunc(repo):
554 """Return the function to clear tags cache according to repo internal API
554 """Return the function to clear tags cache according to repo internal API
555 """
555 """
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 # correct way to clear tags cache, because existing code paths
558 # correct way to clear tags cache, because existing code paths
559 # expect _tagscache to be a structured object.
559 # expect _tagscache to be a structured object.
560 def clearcache():
560 def clearcache():
561 # _tagscache has been filteredpropertycache since 2.5 (or
561 # _tagscache has been filteredpropertycache since 2.5 (or
562 # 98c867ac1330), and delattr() can't work in such case
562 # 98c867ac1330), and delattr() can't work in such case
563 if b'_tagscache' in vars(repo):
563 if b'_tagscache' in vars(repo):
564 del repo.__dict__[b'_tagscache']
564 del repo.__dict__[b'_tagscache']
565 return clearcache
565 return clearcache
566
566
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 if repotags: # since 1.4 (or 5614a628d173)
568 if repotags: # since 1.4 (or 5614a628d173)
569 return lambda : repotags.set(None)
569 return lambda : repotags.set(None)
570
570
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
573 return lambda : repotagscache.set(None)
573 return lambda : repotagscache.set(None)
574
574
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 # this point, but it isn't so problematic, because:
576 # this point, but it isn't so problematic, because:
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 # in perftags() causes failure soon
578 # in perftags() causes failure soon
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 raise error.Abort((b"tags API of this hg command is unknown"))
580 raise error.Abort((b"tags API of this hg command is unknown"))
581
581
582 # utilities to clear cache
582 # utilities to clear cache
583
583
584 def clearfilecache(obj, attrname):
584 def clearfilecache(obj, attrname):
585 unfiltered = getattr(obj, 'unfiltered', None)
585 unfiltered = getattr(obj, 'unfiltered', None)
586 if unfiltered is not None:
586 if unfiltered is not None:
587 obj = obj.unfiltered()
587 obj = obj.unfiltered()
588 if attrname in vars(obj):
588 if attrname in vars(obj):
589 delattr(obj, attrname)
589 delattr(obj, attrname)
590 obj._filecache.pop(attrname, None)
590 obj._filecache.pop(attrname, None)
591
591
592 def clearchangelog(repo):
592 def clearchangelog(repo):
593 if repo is not repo.unfiltered():
593 if repo is not repo.unfiltered():
594 object.__setattr__(repo, r'_clcachekey', None)
594 object.__setattr__(repo, r'_clcachekey', None)
595 object.__setattr__(repo, r'_clcache', None)
595 object.__setattr__(repo, r'_clcache', None)
596 clearfilecache(repo.unfiltered(), 'changelog')
596 clearfilecache(repo.unfiltered(), 'changelog')
597
597
598 # perf commands
598 # perf commands
599
599
600 @command(b'perfwalk', formatteropts)
600 @command(b'perfwalk', formatteropts)
601 def perfwalk(ui, repo, *pats, **opts):
601 def perfwalk(ui, repo, *pats, **opts):
602 opts = _byteskwargs(opts)
602 opts = _byteskwargs(opts)
603 timer, fm = gettimer(ui, opts)
603 timer, fm = gettimer(ui, opts)
604 m = scmutil.match(repo[None], pats, {})
604 m = scmutil.match(repo[None], pats, {})
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 ignored=False))))
606 ignored=False))))
607 fm.end()
607 fm.end()
608
608
609 @command(b'perfannotate', formatteropts)
609 @command(b'perfannotate', formatteropts)
610 def perfannotate(ui, repo, f, **opts):
610 def perfannotate(ui, repo, f, **opts):
611 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
612 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
613 fc = repo[b'.'][f]
613 fc = repo[b'.'][f]
614 timer(lambda: len(fc.annotate(True)))
614 timer(lambda: len(fc.annotate(True)))
615 fm.end()
615 fm.end()
616
616
617 @command(b'perfstatus',
617 @command(b'perfstatus',
618 [(b'u', b'unknown', False,
618 [(b'u', b'unknown', False,
619 b'ask status to look for unknown files')] + formatteropts)
619 b'ask status to look for unknown files')] + formatteropts)
620 def perfstatus(ui, repo, **opts):
620 def perfstatus(ui, repo, **opts):
621 opts = _byteskwargs(opts)
621 opts = _byteskwargs(opts)
622 #m = match.always(repo.root, repo.getcwd())
622 #m = match.always(repo.root, repo.getcwd())
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 # False))))
624 # False))))
625 timer, fm = gettimer(ui, opts)
625 timer, fm = gettimer(ui, opts)
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 fm.end()
627 fm.end()
628
628
629 @command(b'perfaddremove', formatteropts)
629 @command(b'perfaddremove', formatteropts)
630 def perfaddremove(ui, repo, **opts):
630 def perfaddremove(ui, repo, **opts):
631 opts = _byteskwargs(opts)
631 opts = _byteskwargs(opts)
632 timer, fm = gettimer(ui, opts)
632 timer, fm = gettimer(ui, opts)
633 try:
633 try:
634 oldquiet = repo.ui.quiet
634 oldquiet = repo.ui.quiet
635 repo.ui.quiet = True
635 repo.ui.quiet = True
636 matcher = scmutil.match(repo[None])
636 matcher = scmutil.match(repo[None])
637 opts[b'dry_run'] = True
637 opts[b'dry_run'] = True
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 uipathfn = scmutil.getuipathfn(repo)
639 uipathfn = scmutil.getuipathfn(repo)
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 else:
641 else:
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 finally:
643 finally:
644 repo.ui.quiet = oldquiet
644 repo.ui.quiet = oldquiet
645 fm.end()
645 fm.end()
646
646
647 def clearcaches(cl):
647 def clearcaches(cl):
648 # behave somewhat consistently across internal API changes
648 # behave somewhat consistently across internal API changes
649 if util.safehasattr(cl, b'clearcaches'):
649 if util.safehasattr(cl, b'clearcaches'):
650 cl.clearcaches()
650 cl.clearcaches()
651 elif util.safehasattr(cl, b'_nodecache'):
651 elif util.safehasattr(cl, b'_nodecache'):
652 from mercurial.node import nullid, nullrev
652 from mercurial.node import nullid, nullrev
653 cl._nodecache = {nullid: nullrev}
653 cl._nodecache = {nullid: nullrev}
654 cl._nodepos = None
654 cl._nodepos = None
655
655
656 @command(b'perfheads', formatteropts)
656 @command(b'perfheads', formatteropts)
657 def perfheads(ui, repo, **opts):
657 def perfheads(ui, repo, **opts):
658 """benchmark the computation of a changelog heads"""
658 """benchmark the computation of a changelog heads"""
659 opts = _byteskwargs(opts)
659 opts = _byteskwargs(opts)
660 timer, fm = gettimer(ui, opts)
660 timer, fm = gettimer(ui, opts)
661 cl = repo.changelog
661 cl = repo.changelog
662 def s():
662 def s():
663 clearcaches(cl)
663 clearcaches(cl)
664 def d():
664 def d():
665 len(cl.headrevs())
665 len(cl.headrevs())
666 timer(d, setup=s)
666 timer(d, setup=s)
667 fm.end()
667 fm.end()
668
668
669 @command(b'perftags', formatteropts+
669 @command(b'perftags', formatteropts+
670 [
670 [
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 ])
672 ])
673 def perftags(ui, repo, **opts):
673 def perftags(ui, repo, **opts):
674 opts = _byteskwargs(opts)
674 opts = _byteskwargs(opts)
675 timer, fm = gettimer(ui, opts)
675 timer, fm = gettimer(ui, opts)
676 repocleartagscache = repocleartagscachefunc(repo)
676 repocleartagscache = repocleartagscachefunc(repo)
677 clearrevlogs = opts[b'clear_revlogs']
677 clearrevlogs = opts[b'clear_revlogs']
678 def s():
678 def s():
679 if clearrevlogs:
679 if clearrevlogs:
680 clearchangelog(repo)
680 clearchangelog(repo)
681 clearfilecache(repo.unfiltered(), 'manifest')
681 clearfilecache(repo.unfiltered(), 'manifest')
682 repocleartagscache()
682 repocleartagscache()
683 def t():
683 def t():
684 return len(repo.tags())
684 return len(repo.tags())
685 timer(t, setup=s)
685 timer(t, setup=s)
686 fm.end()
686 fm.end()
687
687
688 @command(b'perfancestors', formatteropts)
688 @command(b'perfancestors', formatteropts)
689 def perfancestors(ui, repo, **opts):
689 def perfancestors(ui, repo, **opts):
690 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
691 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
692 heads = repo.changelog.headrevs()
692 heads = repo.changelog.headrevs()
693 def d():
693 def d():
694 for a in repo.changelog.ancestors(heads):
694 for a in repo.changelog.ancestors(heads):
695 pass
695 pass
696 timer(d)
696 timer(d)
697 fm.end()
697 fm.end()
698
698
699 @command(b'perfancestorset', formatteropts)
699 @command(b'perfancestorset', formatteropts)
700 def perfancestorset(ui, repo, revset, **opts):
700 def perfancestorset(ui, repo, revset, **opts):
701 opts = _byteskwargs(opts)
701 opts = _byteskwargs(opts)
702 timer, fm = gettimer(ui, opts)
702 timer, fm = gettimer(ui, opts)
703 revs = repo.revs(revset)
703 revs = repo.revs(revset)
704 heads = repo.changelog.headrevs()
704 heads = repo.changelog.headrevs()
705 def d():
705 def d():
706 s = repo.changelog.ancestors(heads)
706 s = repo.changelog.ancestors(heads)
707 for rev in revs:
707 for rev in revs:
708 rev in s
708 rev in s
709 timer(d)
709 timer(d)
710 fm.end()
710 fm.end()
711
711
712 @command(b'perfdiscovery', formatteropts, b'PATH')
712 @command(b'perfdiscovery', formatteropts, b'PATH')
713 def perfdiscovery(ui, repo, path, **opts):
713 def perfdiscovery(ui, repo, path, **opts):
714 """benchmark discovery between local repo and the peer at given path
714 """benchmark discovery between local repo and the peer at given path
715 """
715 """
716 repos = [repo, None]
716 repos = [repo, None]
717 timer, fm = gettimer(ui, opts)
717 timer, fm = gettimer(ui, opts)
718 path = ui.expandpath(path)
718 path = ui.expandpath(path)
719
719
720 def s():
720 def s():
721 repos[1] = hg.peer(ui, opts, path)
721 repos[1] = hg.peer(ui, opts, path)
722 def d():
722 def d():
723 setdiscovery.findcommonheads(ui, *repos)
723 setdiscovery.findcommonheads(ui, *repos)
724 timer(d, setup=s)
724 timer(d, setup=s)
725 fm.end()
725 fm.end()
726
726
727 @command(b'perfbookmarks', formatteropts +
727 @command(b'perfbookmarks', formatteropts +
728 [
728 [
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 ])
730 ])
731 def perfbookmarks(ui, repo, **opts):
731 def perfbookmarks(ui, repo, **opts):
732 """benchmark parsing bookmarks from disk to memory"""
732 """benchmark parsing bookmarks from disk to memory"""
733 opts = _byteskwargs(opts)
733 opts = _byteskwargs(opts)
734 timer, fm = gettimer(ui, opts)
734 timer, fm = gettimer(ui, opts)
735
735
736 clearrevlogs = opts[b'clear_revlogs']
736 clearrevlogs = opts[b'clear_revlogs']
737 def s():
737 def s():
738 if clearrevlogs:
738 if clearrevlogs:
739 clearchangelog(repo)
739 clearchangelog(repo)
740 clearfilecache(repo, b'_bookmarks')
740 clearfilecache(repo, b'_bookmarks')
741 def d():
741 def d():
742 repo._bookmarks
742 repo._bookmarks
743 timer(d, setup=s)
743 timer(d, setup=s)
744 fm.end()
744 fm.end()
745
745
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 def perfbundleread(ui, repo, bundlepath, **opts):
747 def perfbundleread(ui, repo, bundlepath, **opts):
748 """Benchmark reading of bundle files.
748 """Benchmark reading of bundle files.
749
749
750 This command is meant to isolate the I/O part of bundle reading as
750 This command is meant to isolate the I/O part of bundle reading as
751 much as possible.
751 much as possible.
752 """
752 """
753 from mercurial import (
753 from mercurial import (
754 bundle2,
754 bundle2,
755 exchange,
755 exchange,
756 streamclone,
756 streamclone,
757 )
757 )
758
758
759 opts = _byteskwargs(opts)
759 opts = _byteskwargs(opts)
760
760
761 def makebench(fn):
761 def makebench(fn):
762 def run():
762 def run():
763 with open(bundlepath, b'rb') as fh:
763 with open(bundlepath, b'rb') as fh:
764 bundle = exchange.readbundle(ui, fh, bundlepath)
764 bundle = exchange.readbundle(ui, fh, bundlepath)
765 fn(bundle)
765 fn(bundle)
766
766
767 return run
767 return run
768
768
769 def makereadnbytes(size):
769 def makereadnbytes(size):
770 def run():
770 def run():
771 with open(bundlepath, b'rb') as fh:
771 with open(bundlepath, b'rb') as fh:
772 bundle = exchange.readbundle(ui, fh, bundlepath)
772 bundle = exchange.readbundle(ui, fh, bundlepath)
773 while bundle.read(size):
773 while bundle.read(size):
774 pass
774 pass
775
775
776 return run
776 return run
777
777
778 def makestdioread(size):
778 def makestdioread(size):
779 def run():
779 def run():
780 with open(bundlepath, b'rb') as fh:
780 with open(bundlepath, b'rb') as fh:
781 while fh.read(size):
781 while fh.read(size):
782 pass
782 pass
783
783
784 return run
784 return run
785
785
786 # bundle1
786 # bundle1
787
787
788 def deltaiter(bundle):
788 def deltaiter(bundle):
789 for delta in bundle.deltaiter():
789 for delta in bundle.deltaiter():
790 pass
790 pass
791
791
792 def iterchunks(bundle):
792 def iterchunks(bundle):
793 for chunk in bundle.getchunks():
793 for chunk in bundle.getchunks():
794 pass
794 pass
795
795
796 # bundle2
796 # bundle2
797
797
798 def forwardchunks(bundle):
798 def forwardchunks(bundle):
799 for chunk in bundle._forwardchunks():
799 for chunk in bundle._forwardchunks():
800 pass
800 pass
801
801
802 def iterparts(bundle):
802 def iterparts(bundle):
803 for part in bundle.iterparts():
803 for part in bundle.iterparts():
804 pass
804 pass
805
805
806 def iterpartsseekable(bundle):
806 def iterpartsseekable(bundle):
807 for part in bundle.iterparts(seekable=True):
807 for part in bundle.iterparts(seekable=True):
808 pass
808 pass
809
809
810 def seek(bundle):
810 def seek(bundle):
811 for part in bundle.iterparts(seekable=True):
811 for part in bundle.iterparts(seekable=True):
812 part.seek(0, os.SEEK_END)
812 part.seek(0, os.SEEK_END)
813
813
814 def makepartreadnbytes(size):
814 def makepartreadnbytes(size):
815 def run():
815 def run():
816 with open(bundlepath, b'rb') as fh:
816 with open(bundlepath, b'rb') as fh:
817 bundle = exchange.readbundle(ui, fh, bundlepath)
817 bundle = exchange.readbundle(ui, fh, bundlepath)
818 for part in bundle.iterparts():
818 for part in bundle.iterparts():
819 while part.read(size):
819 while part.read(size):
820 pass
820 pass
821
821
822 return run
822 return run
823
823
824 benches = [
824 benches = [
825 (makestdioread(8192), b'read(8k)'),
825 (makestdioread(8192), b'read(8k)'),
826 (makestdioread(16384), b'read(16k)'),
826 (makestdioread(16384), b'read(16k)'),
827 (makestdioread(32768), b'read(32k)'),
827 (makestdioread(32768), b'read(32k)'),
828 (makestdioread(131072), b'read(128k)'),
828 (makestdioread(131072), b'read(128k)'),
829 ]
829 ]
830
830
831 with open(bundlepath, b'rb') as fh:
831 with open(bundlepath, b'rb') as fh:
832 bundle = exchange.readbundle(ui, fh, bundlepath)
832 bundle = exchange.readbundle(ui, fh, bundlepath)
833
833
834 if isinstance(bundle, changegroup.cg1unpacker):
834 if isinstance(bundle, changegroup.cg1unpacker):
835 benches.extend([
835 benches.extend([
836 (makebench(deltaiter), b'cg1 deltaiter()'),
836 (makebench(deltaiter), b'cg1 deltaiter()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
842 ])
842 ])
843 elif isinstance(bundle, bundle2.unbundle20):
843 elif isinstance(bundle, bundle2.unbundle20):
844 benches.extend([
844 benches.extend([
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 (makebench(seek), b'bundle2 part seek()'),
848 (makebench(seek), b'bundle2 part seek()'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 ])
853 ])
854 elif isinstance(bundle, streamclone.streamcloneapplier):
854 elif isinstance(bundle, streamclone.streamcloneapplier):
855 raise error.Abort(b'stream clone bundles not supported')
855 raise error.Abort(b'stream clone bundles not supported')
856 else:
856 else:
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858
858
859 for fn, title in benches:
859 for fn, title in benches:
860 timer, fm = gettimer(ui, opts)
860 timer, fm = gettimer(ui, opts)
861 timer(fn, title=title)
861 timer(fn, title=title)
862 fm.end()
862 fm.end()
863
863
864 @command(b'perfchangegroupchangelog', formatteropts +
864 @command(b'perfchangegroupchangelog', formatteropts +
865 [(b'', b'cgversion', b'02', b'changegroup version'),
865 [(b'', b'cgversion', b'02', b'changegroup version'),
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 """Benchmark producing a changelog group for a changegroup.
868 """Benchmark producing a changelog group for a changegroup.
869
869
870 This measures the time spent processing the changelog during a
870 This measures the time spent processing the changelog during a
871 bundle operation. This occurs during `hg bundle` and on a server
871 bundle operation. This occurs during `hg bundle` and on a server
872 processing a `getbundle` wire protocol request (handles clones
872 processing a `getbundle` wire protocol request (handles clones
873 and pull requests).
873 and pull requests).
874
874
875 By default, all revisions are added to the changegroup.
875 By default, all revisions are added to the changegroup.
876 """
876 """
877 opts = _byteskwargs(opts)
877 opts = _byteskwargs(opts)
878 cl = repo.changelog
878 cl = repo.changelog
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 bundler = changegroup.getbundler(cgversion, repo)
880 bundler = changegroup.getbundler(cgversion, repo)
881
881
882 def d():
882 def d():
883 state, chunks = bundler._generatechangelog(cl, nodes)
883 state, chunks = bundler._generatechangelog(cl, nodes)
884 for chunk in chunks:
884 for chunk in chunks:
885 pass
885 pass
886
886
887 timer, fm = gettimer(ui, opts)
887 timer, fm = gettimer(ui, opts)
888
888
889 # Terminal printing can interfere with timing. So disable it.
889 # Terminal printing can interfere with timing. So disable it.
890 with ui.configoverride({(b'progress', b'disable'): True}):
890 with ui.configoverride({(b'progress', b'disable'): True}):
891 timer(d)
891 timer(d)
892
892
893 fm.end()
893 fm.end()
894
894
895 @command(b'perfdirs', formatteropts)
895 @command(b'perfdirs', formatteropts)
896 def perfdirs(ui, repo, **opts):
896 def perfdirs(ui, repo, **opts):
897 opts = _byteskwargs(opts)
897 opts = _byteskwargs(opts)
898 timer, fm = gettimer(ui, opts)
898 timer, fm = gettimer(ui, opts)
899 dirstate = repo.dirstate
899 dirstate = repo.dirstate
900 b'a' in dirstate
900 b'a' in dirstate
901 def d():
901 def d():
902 dirstate.hasdir(b'a')
902 dirstate.hasdir(b'a')
903 del dirstate._map._dirs
903 del dirstate._map._dirs
904 timer(d)
904 timer(d)
905 fm.end()
905 fm.end()
906
906
907 @command(b'perfdirstate', formatteropts)
907 @command(b'perfdirstate', formatteropts)
908 def perfdirstate(ui, repo, **opts):
908 def perfdirstate(ui, repo, **opts):
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911 b"a" in repo.dirstate
911 b"a" in repo.dirstate
912 def d():
912 def d():
913 repo.dirstate.invalidate()
913 repo.dirstate.invalidate()
914 b"a" in repo.dirstate
914 b"a" in repo.dirstate
915 timer(d)
915 timer(d)
916 fm.end()
916 fm.end()
917
917
918 @command(b'perfdirstatedirs', formatteropts)
918 @command(b'perfdirstatedirs', formatteropts)
919 def perfdirstatedirs(ui, repo, **opts):
919 def perfdirstatedirs(ui, repo, **opts):
920 opts = _byteskwargs(opts)
920 opts = _byteskwargs(opts)
921 timer, fm = gettimer(ui, opts)
921 timer, fm = gettimer(ui, opts)
922 b"a" in repo.dirstate
922 b"a" in repo.dirstate
923 def d():
923 def d():
924 repo.dirstate.hasdir(b"a")
924 repo.dirstate.hasdir(b"a")
925 del repo.dirstate._map._dirs
925 del repo.dirstate._map._dirs
926 timer(d)
926 timer(d)
927 fm.end()
927 fm.end()
928
928
929 @command(b'perfdirstatefoldmap', formatteropts)
929 @command(b'perfdirstatefoldmap', formatteropts)
930 def perfdirstatefoldmap(ui, repo, **opts):
930 def perfdirstatefoldmap(ui, repo, **opts):
931 opts = _byteskwargs(opts)
931 opts = _byteskwargs(opts)
932 timer, fm = gettimer(ui, opts)
932 timer, fm = gettimer(ui, opts)
933 dirstate = repo.dirstate
933 dirstate = repo.dirstate
934 b'a' in dirstate
934 b'a' in dirstate
935 def d():
935 def d():
936 dirstate._map.filefoldmap.get(b'a')
936 dirstate._map.filefoldmap.get(b'a')
937 del dirstate._map.filefoldmap
937 del dirstate._map.filefoldmap
938 timer(d)
938 timer(d)
939 fm.end()
939 fm.end()
940
940
941 @command(b'perfdirfoldmap', formatteropts)
941 @command(b'perfdirfoldmap', formatteropts)
942 def perfdirfoldmap(ui, repo, **opts):
942 def perfdirfoldmap(ui, repo, **opts):
943 opts = _byteskwargs(opts)
943 opts = _byteskwargs(opts)
944 timer, fm = gettimer(ui, opts)
944 timer, fm = gettimer(ui, opts)
945 dirstate = repo.dirstate
945 dirstate = repo.dirstate
946 b'a' in dirstate
946 b'a' in dirstate
947 def d():
947 def d():
948 dirstate._map.dirfoldmap.get(b'a')
948 dirstate._map.dirfoldmap.get(b'a')
949 del dirstate._map.dirfoldmap
949 del dirstate._map.dirfoldmap
950 del dirstate._map._dirs
950 del dirstate._map._dirs
951 timer(d)
951 timer(d)
952 fm.end()
952 fm.end()
953
953
954 @command(b'perfdirstatewrite', formatteropts)
954 @command(b'perfdirstatewrite', formatteropts)
955 def perfdirstatewrite(ui, repo, **opts):
955 def perfdirstatewrite(ui, repo, **opts):
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957 timer, fm = gettimer(ui, opts)
957 timer, fm = gettimer(ui, opts)
958 ds = repo.dirstate
958 ds = repo.dirstate
959 b"a" in ds
959 b"a" in ds
960 def d():
960 def d():
961 ds._dirty = True
961 ds._dirty = True
962 ds.write(repo.currenttransaction())
962 ds.write(repo.currenttransaction())
963 timer(d)
963 timer(d)
964 fm.end()
964 fm.end()
965
965
966 def _getmergerevs(repo, opts):
966 def _getmergerevs(repo, opts):
967 """parse command argument to return rev involved in merge
967 """parse command argument to return rev involved in merge
968
968
969 input: options dictionnary with `rev`, `from` and `bse`
969 input: options dictionnary with `rev`, `from` and `bse`
970 output: (localctx, otherctx, basectx)
970 output: (localctx, otherctx, basectx)
971 """
971 """
972 if opts['from']:
972 if opts['from']:
973 fromrev = scmutil.revsingle(repo, opts['from'])
973 fromrev = scmutil.revsingle(repo, opts['from'])
974 wctx = repo[fromrev]
974 wctx = repo[fromrev]
975 else:
975 else:
976 wctx = repo[None]
976 wctx = repo[None]
977 # we don't want working dir files to be stat'd in the benchmark, so
977 # we don't want working dir files to be stat'd in the benchmark, so
978 # prime that cache
978 # prime that cache
979 wctx.dirty()
979 wctx.dirty()
980 rctx = scmutil.revsingle(repo, opts['rev'], opts['rev'])
980 rctx = scmutil.revsingle(repo, opts['rev'], opts['rev'])
981 if opts['base']:
981 if opts['base']:
982 fromrev = scmutil.revsingle(repo, opts['base'])
982 fromrev = scmutil.revsingle(repo, opts['base'])
983 ancestor = repo[fromrev]
983 ancestor = repo[fromrev]
984 else:
984 else:
985 ancestor = wctx.ancestor(rctx)
985 ancestor = wctx.ancestor(rctx)
986 return (wctx, rctx, ancestor)
986 return (wctx, rctx, ancestor)
987
987
988 @command(b'perfmergecalculate',
988 @command(b'perfmergecalculate',
989 [
989 [
990 (b'r', b'rev', b'.', b'rev to merge against'),
990 (b'r', b'rev', b'.', b'rev to merge against'),
991 (b'', b'from', b'', b'rev to merge from'),
991 (b'', b'from', b'', b'rev to merge from'),
992 (b'', b'base', b'', b'the revision to use as base'),
992 (b'', b'base', b'', b'the revision to use as base'),
993 ] + formatteropts)
993 ] + formatteropts)
994 def perfmergecalculate(ui, repo, **opts):
994 def perfmergecalculate(ui, repo, **opts):
995 opts = _byteskwargs(opts)
995 opts = _byteskwargs(opts)
996 timer, fm = gettimer(ui, opts)
996 timer, fm = gettimer(ui, opts)
997
997
998 wctx, rctx, ancestor = _getmergerevs(repo, opts)
998 wctx, rctx, ancestor = _getmergerevs(repo, opts)
999 def d():
999 def d():
1000 # acceptremote is True because we don't want prompts in the middle of
1000 # acceptremote is True because we don't want prompts in the middle of
1001 # our benchmark
1001 # our benchmark
1002 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1002 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1003 acceptremote=True, followcopies=True)
1003 acceptremote=True, followcopies=True)
1004 timer(d)
1004 timer(d)
1005 fm.end()
1005 fm.end()
1006
1006
1007 @command(b'perfmergecopies',
1008 [
1009 (b'r', b'rev', b'.', b'rev to merge against'),
1010 (b'', b'from', b'', b'rev to merge from'),
1011 (b'', b'base', b'', b'the revision to use as base'),
1012 ] + formatteropts)
1013 def perfmergecopies(ui, repo, **opts):
1014 """measure runtime of `copies.mergecopies`"""
1015 opts = _byteskwargs(opts)
1016 timer, fm = gettimer(ui, opts)
1017 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1018 def d():
1019 # acceptremote is True because we don't want prompts in the middle of
1020 # our benchmark
1021 copies.mergecopies(repo, wctx, rctx, ancestor)
1022 timer(d)
1023 fm.end()
1024
1007 @command(b'perfpathcopies', [], b"REV REV")
1025 @command(b'perfpathcopies', [], b"REV REV")
1008 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1026 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1009 """benchmark the copy tracing logic"""
1027 """benchmark the copy tracing logic"""
1010 opts = _byteskwargs(opts)
1028 opts = _byteskwargs(opts)
1011 timer, fm = gettimer(ui, opts)
1029 timer, fm = gettimer(ui, opts)
1012 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1030 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1013 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1031 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1014 def d():
1032 def d():
1015 copies.pathcopies(ctx1, ctx2)
1033 copies.pathcopies(ctx1, ctx2)
1016 timer(d)
1034 timer(d)
1017 fm.end()
1035 fm.end()
1018
1036
1019 @command(b'perfphases',
1037 @command(b'perfphases',
1020 [(b'', b'full', False, b'include file reading time too'),
1038 [(b'', b'full', False, b'include file reading time too'),
1021 ], b"")
1039 ], b"")
1022 def perfphases(ui, repo, **opts):
1040 def perfphases(ui, repo, **opts):
1023 """benchmark phasesets computation"""
1041 """benchmark phasesets computation"""
1024 opts = _byteskwargs(opts)
1042 opts = _byteskwargs(opts)
1025 timer, fm = gettimer(ui, opts)
1043 timer, fm = gettimer(ui, opts)
1026 _phases = repo._phasecache
1044 _phases = repo._phasecache
1027 full = opts.get(b'full')
1045 full = opts.get(b'full')
1028 def d():
1046 def d():
1029 phases = _phases
1047 phases = _phases
1030 if full:
1048 if full:
1031 clearfilecache(repo, b'_phasecache')
1049 clearfilecache(repo, b'_phasecache')
1032 phases = repo._phasecache
1050 phases = repo._phasecache
1033 phases.invalidate()
1051 phases.invalidate()
1034 phases.loadphaserevs(repo)
1052 phases.loadphaserevs(repo)
1035 timer(d)
1053 timer(d)
1036 fm.end()
1054 fm.end()
1037
1055
1038 @command(b'perfphasesremote',
1056 @command(b'perfphasesremote',
1039 [], b"[DEST]")
1057 [], b"[DEST]")
1040 def perfphasesremote(ui, repo, dest=None, **opts):
1058 def perfphasesremote(ui, repo, dest=None, **opts):
1041 """benchmark time needed to analyse phases of the remote server"""
1059 """benchmark time needed to analyse phases of the remote server"""
1042 from mercurial.node import (
1060 from mercurial.node import (
1043 bin,
1061 bin,
1044 )
1062 )
1045 from mercurial import (
1063 from mercurial import (
1046 exchange,
1064 exchange,
1047 hg,
1065 hg,
1048 phases,
1066 phases,
1049 )
1067 )
1050 opts = _byteskwargs(opts)
1068 opts = _byteskwargs(opts)
1051 timer, fm = gettimer(ui, opts)
1069 timer, fm = gettimer(ui, opts)
1052
1070
1053 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1071 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1054 if not path:
1072 if not path:
1055 raise error.Abort((b'default repository not configured!'),
1073 raise error.Abort((b'default repository not configured!'),
1056 hint=(b"see 'hg help config.paths'"))
1074 hint=(b"see 'hg help config.paths'"))
1057 dest = path.pushloc or path.loc
1075 dest = path.pushloc or path.loc
1058 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1076 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1059 other = hg.peer(repo, opts, dest)
1077 other = hg.peer(repo, opts, dest)
1060
1078
1061 # easier to perform discovery through the operation
1079 # easier to perform discovery through the operation
1062 op = exchange.pushoperation(repo, other)
1080 op = exchange.pushoperation(repo, other)
1063 exchange._pushdiscoverychangeset(op)
1081 exchange._pushdiscoverychangeset(op)
1064
1082
1065 remotesubset = op.fallbackheads
1083 remotesubset = op.fallbackheads
1066
1084
1067 with other.commandexecutor() as e:
1085 with other.commandexecutor() as e:
1068 remotephases = e.callcommand(b'listkeys',
1086 remotephases = e.callcommand(b'listkeys',
1069 {b'namespace': b'phases'}).result()
1087 {b'namespace': b'phases'}).result()
1070 del other
1088 del other
1071 publishing = remotephases.get(b'publishing', False)
1089 publishing = remotephases.get(b'publishing', False)
1072 if publishing:
1090 if publishing:
1073 ui.status((b'publishing: yes\n'))
1091 ui.status((b'publishing: yes\n'))
1074 else:
1092 else:
1075 ui.status((b'publishing: no\n'))
1093 ui.status((b'publishing: no\n'))
1076
1094
1077 nodemap = repo.changelog.nodemap
1095 nodemap = repo.changelog.nodemap
1078 nonpublishroots = 0
1096 nonpublishroots = 0
1079 for nhex, phase in remotephases.iteritems():
1097 for nhex, phase in remotephases.iteritems():
1080 if nhex == b'publishing': # ignore data related to publish option
1098 if nhex == b'publishing': # ignore data related to publish option
1081 continue
1099 continue
1082 node = bin(nhex)
1100 node = bin(nhex)
1083 if node in nodemap and int(phase):
1101 if node in nodemap and int(phase):
1084 nonpublishroots += 1
1102 nonpublishroots += 1
1085 ui.status((b'number of roots: %d\n') % len(remotephases))
1103 ui.status((b'number of roots: %d\n') % len(remotephases))
1086 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1104 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1087 def d():
1105 def d():
1088 phases.remotephasessummary(repo,
1106 phases.remotephasessummary(repo,
1089 remotesubset,
1107 remotesubset,
1090 remotephases)
1108 remotephases)
1091 timer(d)
1109 timer(d)
1092 fm.end()
1110 fm.end()
1093
1111
1094 @command(b'perfmanifest',[
1112 @command(b'perfmanifest',[
1095 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1113 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1096 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1114 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1097 ] + formatteropts, b'REV|NODE')
1115 ] + formatteropts, b'REV|NODE')
1098 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1116 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1099 """benchmark the time to read a manifest from disk and return a usable
1117 """benchmark the time to read a manifest from disk and return a usable
1100 dict-like object
1118 dict-like object
1101
1119
1102 Manifest caches are cleared before retrieval."""
1120 Manifest caches are cleared before retrieval."""
1103 opts = _byteskwargs(opts)
1121 opts = _byteskwargs(opts)
1104 timer, fm = gettimer(ui, opts)
1122 timer, fm = gettimer(ui, opts)
1105 if not manifest_rev:
1123 if not manifest_rev:
1106 ctx = scmutil.revsingle(repo, rev, rev)
1124 ctx = scmutil.revsingle(repo, rev, rev)
1107 t = ctx.manifestnode()
1125 t = ctx.manifestnode()
1108 else:
1126 else:
1109 from mercurial.node import bin
1127 from mercurial.node import bin
1110
1128
1111 if len(rev) == 40:
1129 if len(rev) == 40:
1112 t = bin(rev)
1130 t = bin(rev)
1113 else:
1131 else:
1114 try:
1132 try:
1115 rev = int(rev)
1133 rev = int(rev)
1116
1134
1117 if util.safehasattr(repo.manifestlog, b'getstorage'):
1135 if util.safehasattr(repo.manifestlog, b'getstorage'):
1118 t = repo.manifestlog.getstorage(b'').node(rev)
1136 t = repo.manifestlog.getstorage(b'').node(rev)
1119 else:
1137 else:
1120 t = repo.manifestlog._revlog.lookup(rev)
1138 t = repo.manifestlog._revlog.lookup(rev)
1121 except ValueError:
1139 except ValueError:
1122 raise error.Abort(b'manifest revision must be integer or full '
1140 raise error.Abort(b'manifest revision must be integer or full '
1123 b'node')
1141 b'node')
1124 def d():
1142 def d():
1125 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1143 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1126 repo.manifestlog[t].read()
1144 repo.manifestlog[t].read()
1127 timer(d)
1145 timer(d)
1128 fm.end()
1146 fm.end()
1129
1147
1130 @command(b'perfchangeset', formatteropts)
1148 @command(b'perfchangeset', formatteropts)
1131 def perfchangeset(ui, repo, rev, **opts):
1149 def perfchangeset(ui, repo, rev, **opts):
1132 opts = _byteskwargs(opts)
1150 opts = _byteskwargs(opts)
1133 timer, fm = gettimer(ui, opts)
1151 timer, fm = gettimer(ui, opts)
1134 n = scmutil.revsingle(repo, rev).node()
1152 n = scmutil.revsingle(repo, rev).node()
1135 def d():
1153 def d():
1136 repo.changelog.read(n)
1154 repo.changelog.read(n)
1137 #repo.changelog._cache = None
1155 #repo.changelog._cache = None
1138 timer(d)
1156 timer(d)
1139 fm.end()
1157 fm.end()
1140
1158
1141 @command(b'perfignore', formatteropts)
1159 @command(b'perfignore', formatteropts)
1142 def perfignore(ui, repo, **opts):
1160 def perfignore(ui, repo, **opts):
1143 """benchmark operation related to computing ignore"""
1161 """benchmark operation related to computing ignore"""
1144 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1145 timer, fm = gettimer(ui, opts)
1163 timer, fm = gettimer(ui, opts)
1146 dirstate = repo.dirstate
1164 dirstate = repo.dirstate
1147
1165
1148 def setupone():
1166 def setupone():
1149 dirstate.invalidate()
1167 dirstate.invalidate()
1150 clearfilecache(dirstate, b'_ignore')
1168 clearfilecache(dirstate, b'_ignore')
1151
1169
1152 def runone():
1170 def runone():
1153 dirstate._ignore
1171 dirstate._ignore
1154
1172
1155 timer(runone, setup=setupone, title=b"load")
1173 timer(runone, setup=setupone, title=b"load")
1156 fm.end()
1174 fm.end()
1157
1175
1158 @command(b'perfindex', [
1176 @command(b'perfindex', [
1159 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1177 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1160 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1178 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1161 ] + formatteropts)
1179 ] + formatteropts)
1162 def perfindex(ui, repo, **opts):
1180 def perfindex(ui, repo, **opts):
1163 """benchmark index creation time followed by a lookup
1181 """benchmark index creation time followed by a lookup
1164
1182
1165 The default is to look `tip` up. Depending on the index implementation,
1183 The default is to look `tip` up. Depending on the index implementation,
1166 the revision looked up can matters. For example, an implementation
1184 the revision looked up can matters. For example, an implementation
1167 scanning the index will have a faster lookup time for `--rev tip` than for
1185 scanning the index will have a faster lookup time for `--rev tip` than for
1168 `--rev 0`. The number of looked up revisions and their order can also
1186 `--rev 0`. The number of looked up revisions and their order can also
1169 matters.
1187 matters.
1170
1188
1171 Example of useful set to test:
1189 Example of useful set to test:
1172 * tip
1190 * tip
1173 * 0
1191 * 0
1174 * -10:
1192 * -10:
1175 * :10
1193 * :10
1176 * -10: + :10
1194 * -10: + :10
1177 * :10: + -10:
1195 * :10: + -10:
1178 * -10000:
1196 * -10000:
1179 * -10000: + 0
1197 * -10000: + 0
1180
1198
1181 It is not currently possible to check for lookup of a missing node. For
1199 It is not currently possible to check for lookup of a missing node. For
1182 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1200 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1183 import mercurial.revlog
1201 import mercurial.revlog
1184 opts = _byteskwargs(opts)
1202 opts = _byteskwargs(opts)
1185 timer, fm = gettimer(ui, opts)
1203 timer, fm = gettimer(ui, opts)
1186 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1204 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1187 if opts[b'no_lookup']:
1205 if opts[b'no_lookup']:
1188 if opts['rev']:
1206 if opts['rev']:
1189 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1207 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1190 nodes = []
1208 nodes = []
1191 elif not opts[b'rev']:
1209 elif not opts[b'rev']:
1192 nodes = [repo[b"tip"].node()]
1210 nodes = [repo[b"tip"].node()]
1193 else:
1211 else:
1194 revs = scmutil.revrange(repo, opts[b'rev'])
1212 revs = scmutil.revrange(repo, opts[b'rev'])
1195 cl = repo.changelog
1213 cl = repo.changelog
1196 nodes = [cl.node(r) for r in revs]
1214 nodes = [cl.node(r) for r in revs]
1197
1215
1198 unfi = repo.unfiltered()
1216 unfi = repo.unfiltered()
1199 # find the filecache func directly
1217 # find the filecache func directly
1200 # This avoid polluting the benchmark with the filecache logic
1218 # This avoid polluting the benchmark with the filecache logic
1201 makecl = unfi.__class__.changelog.func
1219 makecl = unfi.__class__.changelog.func
1202 def setup():
1220 def setup():
1203 # probably not necessary, but for good measure
1221 # probably not necessary, but for good measure
1204 clearchangelog(unfi)
1222 clearchangelog(unfi)
1205 def d():
1223 def d():
1206 cl = makecl(unfi)
1224 cl = makecl(unfi)
1207 for n in nodes:
1225 for n in nodes:
1208 cl.rev(n)
1226 cl.rev(n)
1209 timer(d, setup=setup)
1227 timer(d, setup=setup)
1210 fm.end()
1228 fm.end()
1211
1229
1212 @command(b'perfnodemap', [
1230 @command(b'perfnodemap', [
1213 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1231 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1214 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1232 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1215 ] + formatteropts)
1233 ] + formatteropts)
1216 def perfnodemap(ui, repo, **opts):
1234 def perfnodemap(ui, repo, **opts):
1217 """benchmark the time necessary to look up revision from a cold nodemap
1235 """benchmark the time necessary to look up revision from a cold nodemap
1218
1236
1219 Depending on the implementation, the amount and order of revision we look
1237 Depending on the implementation, the amount and order of revision we look
1220 up can varies. Example of useful set to test:
1238 up can varies. Example of useful set to test:
1221 * tip
1239 * tip
1222 * 0
1240 * 0
1223 * -10:
1241 * -10:
1224 * :10
1242 * :10
1225 * -10: + :10
1243 * -10: + :10
1226 * :10: + -10:
1244 * :10: + -10:
1227 * -10000:
1245 * -10000:
1228 * -10000: + 0
1246 * -10000: + 0
1229
1247
1230 The command currently focus on valid binary lookup. Benchmarking for
1248 The command currently focus on valid binary lookup. Benchmarking for
1231 hexlookup, prefix lookup and missing lookup would also be valuable.
1249 hexlookup, prefix lookup and missing lookup would also be valuable.
1232 """
1250 """
1233 import mercurial.revlog
1251 import mercurial.revlog
1234 opts = _byteskwargs(opts)
1252 opts = _byteskwargs(opts)
1235 timer, fm = gettimer(ui, opts)
1253 timer, fm = gettimer(ui, opts)
1236 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1254 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1237
1255
1238 unfi = repo.unfiltered()
1256 unfi = repo.unfiltered()
1239 clearcaches = opts['clear_caches']
1257 clearcaches = opts['clear_caches']
1240 # find the filecache func directly
1258 # find the filecache func directly
1241 # This avoid polluting the benchmark with the filecache logic
1259 # This avoid polluting the benchmark with the filecache logic
1242 makecl = unfi.__class__.changelog.func
1260 makecl = unfi.__class__.changelog.func
1243 if not opts[b'rev']:
1261 if not opts[b'rev']:
1244 raise error.Abort('use --rev to specify revisions to look up')
1262 raise error.Abort('use --rev to specify revisions to look up')
1245 revs = scmutil.revrange(repo, opts[b'rev'])
1263 revs = scmutil.revrange(repo, opts[b'rev'])
1246 cl = repo.changelog
1264 cl = repo.changelog
1247 nodes = [cl.node(r) for r in revs]
1265 nodes = [cl.node(r) for r in revs]
1248
1266
1249 # use a list to pass reference to a nodemap from one closure to the next
1267 # use a list to pass reference to a nodemap from one closure to the next
1250 nodeget = [None]
1268 nodeget = [None]
1251 def setnodeget():
1269 def setnodeget():
1252 # probably not necessary, but for good measure
1270 # probably not necessary, but for good measure
1253 clearchangelog(unfi)
1271 clearchangelog(unfi)
1254 nodeget[0] = makecl(unfi).nodemap.get
1272 nodeget[0] = makecl(unfi).nodemap.get
1255
1273
1256 def d():
1274 def d():
1257 get = nodeget[0]
1275 get = nodeget[0]
1258 for n in nodes:
1276 for n in nodes:
1259 get(n)
1277 get(n)
1260
1278
1261 setup = None
1279 setup = None
1262 if clearcaches:
1280 if clearcaches:
1263 def setup():
1281 def setup():
1264 setnodeget()
1282 setnodeget()
1265 else:
1283 else:
1266 setnodeget()
1284 setnodeget()
1267 d() # prewarm the data structure
1285 d() # prewarm the data structure
1268 timer(d, setup=setup)
1286 timer(d, setup=setup)
1269 fm.end()
1287 fm.end()
1270
1288
1271 @command(b'perfstartup', formatteropts)
1289 @command(b'perfstartup', formatteropts)
1272 def perfstartup(ui, repo, **opts):
1290 def perfstartup(ui, repo, **opts):
1273 opts = _byteskwargs(opts)
1291 opts = _byteskwargs(opts)
1274 timer, fm = gettimer(ui, opts)
1292 timer, fm = gettimer(ui, opts)
1275 def d():
1293 def d():
1276 if os.name != r'nt':
1294 if os.name != r'nt':
1277 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1295 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1278 fsencode(sys.argv[0]))
1296 fsencode(sys.argv[0]))
1279 else:
1297 else:
1280 os.environ[r'HGRCPATH'] = r' '
1298 os.environ[r'HGRCPATH'] = r' '
1281 os.system(r"%s version -q > NUL" % sys.argv[0])
1299 os.system(r"%s version -q > NUL" % sys.argv[0])
1282 timer(d)
1300 timer(d)
1283 fm.end()
1301 fm.end()
1284
1302
1285 @command(b'perfparents', formatteropts)
1303 @command(b'perfparents', formatteropts)
1286 def perfparents(ui, repo, **opts):
1304 def perfparents(ui, repo, **opts):
1287 """benchmark the time necessary to fetch one changeset's parents.
1305 """benchmark the time necessary to fetch one changeset's parents.
1288
1306
1289 The fetch is done using the `node identifier`, traversing all object layers
1307 The fetch is done using the `node identifier`, traversing all object layers
1290 from the repository object. The first N revisions will be used for this
1308 from the repository object. The first N revisions will be used for this
1291 benchmark. N is controlled by the ``perf.parentscount`` config option
1309 benchmark. N is controlled by the ``perf.parentscount`` config option
1292 (default: 1000).
1310 (default: 1000).
1293 """
1311 """
1294 opts = _byteskwargs(opts)
1312 opts = _byteskwargs(opts)
1295 timer, fm = gettimer(ui, opts)
1313 timer, fm = gettimer(ui, opts)
1296 # control the number of commits perfparents iterates over
1314 # control the number of commits perfparents iterates over
1297 # experimental config: perf.parentscount
1315 # experimental config: perf.parentscount
1298 count = getint(ui, b"perf", b"parentscount", 1000)
1316 count = getint(ui, b"perf", b"parentscount", 1000)
1299 if len(repo.changelog) < count:
1317 if len(repo.changelog) < count:
1300 raise error.Abort(b"repo needs %d commits for this test" % count)
1318 raise error.Abort(b"repo needs %d commits for this test" % count)
1301 repo = repo.unfiltered()
1319 repo = repo.unfiltered()
1302 nl = [repo.changelog.node(i) for i in _xrange(count)]
1320 nl = [repo.changelog.node(i) for i in _xrange(count)]
1303 def d():
1321 def d():
1304 for n in nl:
1322 for n in nl:
1305 repo.changelog.parents(n)
1323 repo.changelog.parents(n)
1306 timer(d)
1324 timer(d)
1307 fm.end()
1325 fm.end()
1308
1326
1309 @command(b'perfctxfiles', formatteropts)
1327 @command(b'perfctxfiles', formatteropts)
1310 def perfctxfiles(ui, repo, x, **opts):
1328 def perfctxfiles(ui, repo, x, **opts):
1311 opts = _byteskwargs(opts)
1329 opts = _byteskwargs(opts)
1312 x = int(x)
1330 x = int(x)
1313 timer, fm = gettimer(ui, opts)
1331 timer, fm = gettimer(ui, opts)
1314 def d():
1332 def d():
1315 len(repo[x].files())
1333 len(repo[x].files())
1316 timer(d)
1334 timer(d)
1317 fm.end()
1335 fm.end()
1318
1336
1319 @command(b'perfrawfiles', formatteropts)
1337 @command(b'perfrawfiles', formatteropts)
1320 def perfrawfiles(ui, repo, x, **opts):
1338 def perfrawfiles(ui, repo, x, **opts):
1321 opts = _byteskwargs(opts)
1339 opts = _byteskwargs(opts)
1322 x = int(x)
1340 x = int(x)
1323 timer, fm = gettimer(ui, opts)
1341 timer, fm = gettimer(ui, opts)
1324 cl = repo.changelog
1342 cl = repo.changelog
1325 def d():
1343 def d():
1326 len(cl.read(x)[3])
1344 len(cl.read(x)[3])
1327 timer(d)
1345 timer(d)
1328 fm.end()
1346 fm.end()
1329
1347
1330 @command(b'perflookup', formatteropts)
1348 @command(b'perflookup', formatteropts)
1331 def perflookup(ui, repo, rev, **opts):
1349 def perflookup(ui, repo, rev, **opts):
1332 opts = _byteskwargs(opts)
1350 opts = _byteskwargs(opts)
1333 timer, fm = gettimer(ui, opts)
1351 timer, fm = gettimer(ui, opts)
1334 timer(lambda: len(repo.lookup(rev)))
1352 timer(lambda: len(repo.lookup(rev)))
1335 fm.end()
1353 fm.end()
1336
1354
1337 @command(b'perflinelogedits',
1355 @command(b'perflinelogedits',
1338 [(b'n', b'edits', 10000, b'number of edits'),
1356 [(b'n', b'edits', 10000, b'number of edits'),
1339 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1357 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1340 ], norepo=True)
1358 ], norepo=True)
1341 def perflinelogedits(ui, **opts):
1359 def perflinelogedits(ui, **opts):
1342 from mercurial import linelog
1360 from mercurial import linelog
1343
1361
1344 opts = _byteskwargs(opts)
1362 opts = _byteskwargs(opts)
1345
1363
1346 edits = opts[b'edits']
1364 edits = opts[b'edits']
1347 maxhunklines = opts[b'max_hunk_lines']
1365 maxhunklines = opts[b'max_hunk_lines']
1348
1366
1349 maxb1 = 100000
1367 maxb1 = 100000
1350 random.seed(0)
1368 random.seed(0)
1351 randint = random.randint
1369 randint = random.randint
1352 currentlines = 0
1370 currentlines = 0
1353 arglist = []
1371 arglist = []
1354 for rev in _xrange(edits):
1372 for rev in _xrange(edits):
1355 a1 = randint(0, currentlines)
1373 a1 = randint(0, currentlines)
1356 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1374 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1357 b1 = randint(0, maxb1)
1375 b1 = randint(0, maxb1)
1358 b2 = randint(b1, b1 + maxhunklines)
1376 b2 = randint(b1, b1 + maxhunklines)
1359 currentlines += (b2 - b1) - (a2 - a1)
1377 currentlines += (b2 - b1) - (a2 - a1)
1360 arglist.append((rev, a1, a2, b1, b2))
1378 arglist.append((rev, a1, a2, b1, b2))
1361
1379
1362 def d():
1380 def d():
1363 ll = linelog.linelog()
1381 ll = linelog.linelog()
1364 for args in arglist:
1382 for args in arglist:
1365 ll.replacelines(*args)
1383 ll.replacelines(*args)
1366
1384
1367 timer, fm = gettimer(ui, opts)
1385 timer, fm = gettimer(ui, opts)
1368 timer(d)
1386 timer(d)
1369 fm.end()
1387 fm.end()
1370
1388
1371 @command(b'perfrevrange', formatteropts)
1389 @command(b'perfrevrange', formatteropts)
1372 def perfrevrange(ui, repo, *specs, **opts):
1390 def perfrevrange(ui, repo, *specs, **opts):
1373 opts = _byteskwargs(opts)
1391 opts = _byteskwargs(opts)
1374 timer, fm = gettimer(ui, opts)
1392 timer, fm = gettimer(ui, opts)
1375 revrange = scmutil.revrange
1393 revrange = scmutil.revrange
1376 timer(lambda: len(revrange(repo, specs)))
1394 timer(lambda: len(revrange(repo, specs)))
1377 fm.end()
1395 fm.end()
1378
1396
1379 @command(b'perfnodelookup', formatteropts)
1397 @command(b'perfnodelookup', formatteropts)
1380 def perfnodelookup(ui, repo, rev, **opts):
1398 def perfnodelookup(ui, repo, rev, **opts):
1381 opts = _byteskwargs(opts)
1399 opts = _byteskwargs(opts)
1382 timer, fm = gettimer(ui, opts)
1400 timer, fm = gettimer(ui, opts)
1383 import mercurial.revlog
1401 import mercurial.revlog
1384 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1402 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1385 n = scmutil.revsingle(repo, rev).node()
1403 n = scmutil.revsingle(repo, rev).node()
1386 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1404 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1387 def d():
1405 def d():
1388 cl.rev(n)
1406 cl.rev(n)
1389 clearcaches(cl)
1407 clearcaches(cl)
1390 timer(d)
1408 timer(d)
1391 fm.end()
1409 fm.end()
1392
1410
1393 @command(b'perflog',
1411 @command(b'perflog',
1394 [(b'', b'rename', False, b'ask log to follow renames')
1412 [(b'', b'rename', False, b'ask log to follow renames')
1395 ] + formatteropts)
1413 ] + formatteropts)
1396 def perflog(ui, repo, rev=None, **opts):
1414 def perflog(ui, repo, rev=None, **opts):
1397 opts = _byteskwargs(opts)
1415 opts = _byteskwargs(opts)
1398 if rev is None:
1416 if rev is None:
1399 rev=[]
1417 rev=[]
1400 timer, fm = gettimer(ui, opts)
1418 timer, fm = gettimer(ui, opts)
1401 ui.pushbuffer()
1419 ui.pushbuffer()
1402 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1420 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1403 copies=opts.get(b'rename')))
1421 copies=opts.get(b'rename')))
1404 ui.popbuffer()
1422 ui.popbuffer()
1405 fm.end()
1423 fm.end()
1406
1424
1407 @command(b'perfmoonwalk', formatteropts)
1425 @command(b'perfmoonwalk', formatteropts)
1408 def perfmoonwalk(ui, repo, **opts):
1426 def perfmoonwalk(ui, repo, **opts):
1409 """benchmark walking the changelog backwards
1427 """benchmark walking the changelog backwards
1410
1428
1411 This also loads the changelog data for each revision in the changelog.
1429 This also loads the changelog data for each revision in the changelog.
1412 """
1430 """
1413 opts = _byteskwargs(opts)
1431 opts = _byteskwargs(opts)
1414 timer, fm = gettimer(ui, opts)
1432 timer, fm = gettimer(ui, opts)
1415 def moonwalk():
1433 def moonwalk():
1416 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1434 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1417 ctx = repo[i]
1435 ctx = repo[i]
1418 ctx.branch() # read changelog data (in addition to the index)
1436 ctx.branch() # read changelog data (in addition to the index)
1419 timer(moonwalk)
1437 timer(moonwalk)
1420 fm.end()
1438 fm.end()
1421
1439
1422 @command(b'perftemplating',
1440 @command(b'perftemplating',
1423 [(b'r', b'rev', [], b'revisions to run the template on'),
1441 [(b'r', b'rev', [], b'revisions to run the template on'),
1424 ] + formatteropts)
1442 ] + formatteropts)
1425 def perftemplating(ui, repo, testedtemplate=None, **opts):
1443 def perftemplating(ui, repo, testedtemplate=None, **opts):
1426 """test the rendering time of a given template"""
1444 """test the rendering time of a given template"""
1427 if makelogtemplater is None:
1445 if makelogtemplater is None:
1428 raise error.Abort((b"perftemplating not available with this Mercurial"),
1446 raise error.Abort((b"perftemplating not available with this Mercurial"),
1429 hint=b"use 4.3 or later")
1447 hint=b"use 4.3 or later")
1430
1448
1431 opts = _byteskwargs(opts)
1449 opts = _byteskwargs(opts)
1432
1450
1433 nullui = ui.copy()
1451 nullui = ui.copy()
1434 nullui.fout = open(os.devnull, r'wb')
1452 nullui.fout = open(os.devnull, r'wb')
1435 nullui.disablepager()
1453 nullui.disablepager()
1436 revs = opts.get(b'rev')
1454 revs = opts.get(b'rev')
1437 if not revs:
1455 if not revs:
1438 revs = [b'all()']
1456 revs = [b'all()']
1439 revs = list(scmutil.revrange(repo, revs))
1457 revs = list(scmutil.revrange(repo, revs))
1440
1458
1441 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1459 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1442 b' {author|person}: {desc|firstline}\n')
1460 b' {author|person}: {desc|firstline}\n')
1443 if testedtemplate is None:
1461 if testedtemplate is None:
1444 testedtemplate = defaulttemplate
1462 testedtemplate = defaulttemplate
1445 displayer = makelogtemplater(nullui, repo, testedtemplate)
1463 displayer = makelogtemplater(nullui, repo, testedtemplate)
1446 def format():
1464 def format():
1447 for r in revs:
1465 for r in revs:
1448 ctx = repo[r]
1466 ctx = repo[r]
1449 displayer.show(ctx)
1467 displayer.show(ctx)
1450 displayer.flush(ctx)
1468 displayer.flush(ctx)
1451
1469
1452 timer, fm = gettimer(ui, opts)
1470 timer, fm = gettimer(ui, opts)
1453 timer(format)
1471 timer(format)
1454 fm.end()
1472 fm.end()
1455
1473
1456 @command(b'perfhelper-pathcopies', formatteropts +
1474 @command(b'perfhelper-pathcopies', formatteropts +
1457 [
1475 [
1458 (b'r', b'revs', [], b'restrict search to these revisions'),
1476 (b'r', b'revs', [], b'restrict search to these revisions'),
1459 (b'', b'timing', False, b'provides extra data (costly)'),
1477 (b'', b'timing', False, b'provides extra data (costly)'),
1460 ])
1478 ])
1461 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1479 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1462 """find statistic about potential parameters for the `perftracecopies`
1480 """find statistic about potential parameters for the `perftracecopies`
1463
1481
1464 This command find source-destination pair relevant for copytracing testing.
1482 This command find source-destination pair relevant for copytracing testing.
1465 It report value for some of the parameters that impact copy tracing time.
1483 It report value for some of the parameters that impact copy tracing time.
1466
1484
1467 If `--timing` is set, rename detection is run and the associated timing
1485 If `--timing` is set, rename detection is run and the associated timing
1468 will be reported. The extra details comes at the cost of a slower command
1486 will be reported. The extra details comes at the cost of a slower command
1469 execution.
1487 execution.
1470
1488
1471 Since the rename detection is only run once, other factors might easily
1489 Since the rename detection is only run once, other factors might easily
1472 affect the precision of the timing. However it should give a good
1490 affect the precision of the timing. However it should give a good
1473 approximation of which revision pairs are very costly.
1491 approximation of which revision pairs are very costly.
1474 """
1492 """
1475 opts = _byteskwargs(opts)
1493 opts = _byteskwargs(opts)
1476 fm = ui.formatter(b'perf', opts)
1494 fm = ui.formatter(b'perf', opts)
1477 dotiming = opts[b'timing']
1495 dotiming = opts[b'timing']
1478
1496
1479 if dotiming:
1497 if dotiming:
1480 header = '%12s %12s %12s %12s %12s %12s\n'
1498 header = '%12s %12s %12s %12s %12s %12s\n'
1481 output = ("%(source)12s %(destination)12s "
1499 output = ("%(source)12s %(destination)12s "
1482 "%(nbrevs)12d %(nbmissingfiles)12d "
1500 "%(nbrevs)12d %(nbmissingfiles)12d "
1483 "%(nbrenamedfiles)12d %(time)18.5f\n")
1501 "%(nbrenamedfiles)12d %(time)18.5f\n")
1484 header_names = ("source", "destination", "nb-revs", "nb-files",
1502 header_names = ("source", "destination", "nb-revs", "nb-files",
1485 "nb-renames", "time")
1503 "nb-renames", "time")
1486 fm.plain(header % header_names)
1504 fm.plain(header % header_names)
1487 else:
1505 else:
1488 header = '%12s %12s %12s %12s\n'
1506 header = '%12s %12s %12s %12s\n'
1489 output = ("%(source)12s %(destination)12s "
1507 output = ("%(source)12s %(destination)12s "
1490 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1508 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1491 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1509 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1492
1510
1493 if not revs:
1511 if not revs:
1494 revs = ['all()']
1512 revs = ['all()']
1495 revs = scmutil.revrange(repo, revs)
1513 revs = scmutil.revrange(repo, revs)
1496
1514
1497 roi = repo.revs('merge() and %ld', revs)
1515 roi = repo.revs('merge() and %ld', revs)
1498 for r in roi:
1516 for r in roi:
1499 ctx = repo[r]
1517 ctx = repo[r]
1500 p1 = ctx.p1().rev()
1518 p1 = ctx.p1().rev()
1501 p2 = ctx.p2().rev()
1519 p2 = ctx.p2().rev()
1502 bases = repo.changelog._commonancestorsheads(p1, p2)
1520 bases = repo.changelog._commonancestorsheads(p1, p2)
1503 for p in (p1, p2):
1521 for p in (p1, p2):
1504 for b in bases:
1522 for b in bases:
1505 base = repo[b]
1523 base = repo[b]
1506 parent = repo[p]
1524 parent = repo[p]
1507 missing = copies._computeforwardmissing(base, parent)
1525 missing = copies._computeforwardmissing(base, parent)
1508 if not missing:
1526 if not missing:
1509 continue
1527 continue
1510 data = {
1528 data = {
1511 b'source': base.hex(),
1529 b'source': base.hex(),
1512 b'destination': parent.hex(),
1530 b'destination': parent.hex(),
1513 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1531 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1514 b'nbmissingfiles': len(missing),
1532 b'nbmissingfiles': len(missing),
1515 }
1533 }
1516 if dotiming:
1534 if dotiming:
1517 begin = util.timer()
1535 begin = util.timer()
1518 renames = copies.pathcopies(base, parent)
1536 renames = copies.pathcopies(base, parent)
1519 end = util.timer()
1537 end = util.timer()
1520 # not very stable timing since we did only one run
1538 # not very stable timing since we did only one run
1521 data['time'] = end - begin
1539 data['time'] = end - begin
1522 data['nbrenamedfiles'] = len(renames)
1540 data['nbrenamedfiles'] = len(renames)
1523 fm.startitem()
1541 fm.startitem()
1524 fm.data(**data)
1542 fm.data(**data)
1525 out = data.copy()
1543 out = data.copy()
1526 out['source'] = fm.hexfunc(base.node())
1544 out['source'] = fm.hexfunc(base.node())
1527 out['destination'] = fm.hexfunc(parent.node())
1545 out['destination'] = fm.hexfunc(parent.node())
1528 fm.plain(output % out)
1546 fm.plain(output % out)
1529
1547
1530 fm.end()
1548 fm.end()
1531
1549
1532 @command(b'perfcca', formatteropts)
1550 @command(b'perfcca', formatteropts)
1533 def perfcca(ui, repo, **opts):
1551 def perfcca(ui, repo, **opts):
1534 opts = _byteskwargs(opts)
1552 opts = _byteskwargs(opts)
1535 timer, fm = gettimer(ui, opts)
1553 timer, fm = gettimer(ui, opts)
1536 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1554 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1537 fm.end()
1555 fm.end()
1538
1556
1539 @command(b'perffncacheload', formatteropts)
1557 @command(b'perffncacheload', formatteropts)
1540 def perffncacheload(ui, repo, **opts):
1558 def perffncacheload(ui, repo, **opts):
1541 opts = _byteskwargs(opts)
1559 opts = _byteskwargs(opts)
1542 timer, fm = gettimer(ui, opts)
1560 timer, fm = gettimer(ui, opts)
1543 s = repo.store
1561 s = repo.store
1544 def d():
1562 def d():
1545 s.fncache._load()
1563 s.fncache._load()
1546 timer(d)
1564 timer(d)
1547 fm.end()
1565 fm.end()
1548
1566
1549 @command(b'perffncachewrite', formatteropts)
1567 @command(b'perffncachewrite', formatteropts)
1550 def perffncachewrite(ui, repo, **opts):
1568 def perffncachewrite(ui, repo, **opts):
1551 opts = _byteskwargs(opts)
1569 opts = _byteskwargs(opts)
1552 timer, fm = gettimer(ui, opts)
1570 timer, fm = gettimer(ui, opts)
1553 s = repo.store
1571 s = repo.store
1554 lock = repo.lock()
1572 lock = repo.lock()
1555 s.fncache._load()
1573 s.fncache._load()
1556 tr = repo.transaction(b'perffncachewrite')
1574 tr = repo.transaction(b'perffncachewrite')
1557 tr.addbackup(b'fncache')
1575 tr.addbackup(b'fncache')
1558 def d():
1576 def d():
1559 s.fncache._dirty = True
1577 s.fncache._dirty = True
1560 s.fncache.write(tr)
1578 s.fncache.write(tr)
1561 timer(d)
1579 timer(d)
1562 tr.close()
1580 tr.close()
1563 lock.release()
1581 lock.release()
1564 fm.end()
1582 fm.end()
1565
1583
1566 @command(b'perffncacheencode', formatteropts)
1584 @command(b'perffncacheencode', formatteropts)
1567 def perffncacheencode(ui, repo, **opts):
1585 def perffncacheencode(ui, repo, **opts):
1568 opts = _byteskwargs(opts)
1586 opts = _byteskwargs(opts)
1569 timer, fm = gettimer(ui, opts)
1587 timer, fm = gettimer(ui, opts)
1570 s = repo.store
1588 s = repo.store
1571 s.fncache._load()
1589 s.fncache._load()
1572 def d():
1590 def d():
1573 for p in s.fncache.entries:
1591 for p in s.fncache.entries:
1574 s.encode(p)
1592 s.encode(p)
1575 timer(d)
1593 timer(d)
1576 fm.end()
1594 fm.end()
1577
1595
1578 def _bdiffworker(q, blocks, xdiff, ready, done):
1596 def _bdiffworker(q, blocks, xdiff, ready, done):
1579 while not done.is_set():
1597 while not done.is_set():
1580 pair = q.get()
1598 pair = q.get()
1581 while pair is not None:
1599 while pair is not None:
1582 if xdiff:
1600 if xdiff:
1583 mdiff.bdiff.xdiffblocks(*pair)
1601 mdiff.bdiff.xdiffblocks(*pair)
1584 elif blocks:
1602 elif blocks:
1585 mdiff.bdiff.blocks(*pair)
1603 mdiff.bdiff.blocks(*pair)
1586 else:
1604 else:
1587 mdiff.textdiff(*pair)
1605 mdiff.textdiff(*pair)
1588 q.task_done()
1606 q.task_done()
1589 pair = q.get()
1607 pair = q.get()
1590 q.task_done() # for the None one
1608 q.task_done() # for the None one
1591 with ready:
1609 with ready:
1592 ready.wait()
1610 ready.wait()
1593
1611
1594 def _manifestrevision(repo, mnode):
1612 def _manifestrevision(repo, mnode):
1595 ml = repo.manifestlog
1613 ml = repo.manifestlog
1596
1614
1597 if util.safehasattr(ml, b'getstorage'):
1615 if util.safehasattr(ml, b'getstorage'):
1598 store = ml.getstorage(b'')
1616 store = ml.getstorage(b'')
1599 else:
1617 else:
1600 store = ml._revlog
1618 store = ml._revlog
1601
1619
1602 return store.revision(mnode)
1620 return store.revision(mnode)
1603
1621
1604 @command(b'perfbdiff', revlogopts + formatteropts + [
1622 @command(b'perfbdiff', revlogopts + formatteropts + [
1605 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1623 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1606 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1624 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1607 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1625 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1608 (b'', b'blocks', False, b'test computing diffs into blocks'),
1626 (b'', b'blocks', False, b'test computing diffs into blocks'),
1609 (b'', b'xdiff', False, b'use xdiff algorithm'),
1627 (b'', b'xdiff', False, b'use xdiff algorithm'),
1610 ],
1628 ],
1611
1629
1612 b'-c|-m|FILE REV')
1630 b'-c|-m|FILE REV')
1613 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1631 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1614 """benchmark a bdiff between revisions
1632 """benchmark a bdiff between revisions
1615
1633
1616 By default, benchmark a bdiff between its delta parent and itself.
1634 By default, benchmark a bdiff between its delta parent and itself.
1617
1635
1618 With ``--count``, benchmark bdiffs between delta parents and self for N
1636 With ``--count``, benchmark bdiffs between delta parents and self for N
1619 revisions starting at the specified revision.
1637 revisions starting at the specified revision.
1620
1638
1621 With ``--alldata``, assume the requested revision is a changeset and
1639 With ``--alldata``, assume the requested revision is a changeset and
1622 measure bdiffs for all changes related to that changeset (manifest
1640 measure bdiffs for all changes related to that changeset (manifest
1623 and filelogs).
1641 and filelogs).
1624 """
1642 """
1625 opts = _byteskwargs(opts)
1643 opts = _byteskwargs(opts)
1626
1644
1627 if opts[b'xdiff'] and not opts[b'blocks']:
1645 if opts[b'xdiff'] and not opts[b'blocks']:
1628 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1646 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1629
1647
1630 if opts[b'alldata']:
1648 if opts[b'alldata']:
1631 opts[b'changelog'] = True
1649 opts[b'changelog'] = True
1632
1650
1633 if opts.get(b'changelog') or opts.get(b'manifest'):
1651 if opts.get(b'changelog') or opts.get(b'manifest'):
1634 file_, rev = None, file_
1652 file_, rev = None, file_
1635 elif rev is None:
1653 elif rev is None:
1636 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1654 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1637
1655
1638 blocks = opts[b'blocks']
1656 blocks = opts[b'blocks']
1639 xdiff = opts[b'xdiff']
1657 xdiff = opts[b'xdiff']
1640 textpairs = []
1658 textpairs = []
1641
1659
1642 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1660 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1643
1661
1644 startrev = r.rev(r.lookup(rev))
1662 startrev = r.rev(r.lookup(rev))
1645 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1663 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1646 if opts[b'alldata']:
1664 if opts[b'alldata']:
1647 # Load revisions associated with changeset.
1665 # Load revisions associated with changeset.
1648 ctx = repo[rev]
1666 ctx = repo[rev]
1649 mtext = _manifestrevision(repo, ctx.manifestnode())
1667 mtext = _manifestrevision(repo, ctx.manifestnode())
1650 for pctx in ctx.parents():
1668 for pctx in ctx.parents():
1651 pman = _manifestrevision(repo, pctx.manifestnode())
1669 pman = _manifestrevision(repo, pctx.manifestnode())
1652 textpairs.append((pman, mtext))
1670 textpairs.append((pman, mtext))
1653
1671
1654 # Load filelog revisions by iterating manifest delta.
1672 # Load filelog revisions by iterating manifest delta.
1655 man = ctx.manifest()
1673 man = ctx.manifest()
1656 pman = ctx.p1().manifest()
1674 pman = ctx.p1().manifest()
1657 for filename, change in pman.diff(man).items():
1675 for filename, change in pman.diff(man).items():
1658 fctx = repo.file(filename)
1676 fctx = repo.file(filename)
1659 f1 = fctx.revision(change[0][0] or -1)
1677 f1 = fctx.revision(change[0][0] or -1)
1660 f2 = fctx.revision(change[1][0] or -1)
1678 f2 = fctx.revision(change[1][0] or -1)
1661 textpairs.append((f1, f2))
1679 textpairs.append((f1, f2))
1662 else:
1680 else:
1663 dp = r.deltaparent(rev)
1681 dp = r.deltaparent(rev)
1664 textpairs.append((r.revision(dp), r.revision(rev)))
1682 textpairs.append((r.revision(dp), r.revision(rev)))
1665
1683
1666 withthreads = threads > 0
1684 withthreads = threads > 0
1667 if not withthreads:
1685 if not withthreads:
1668 def d():
1686 def d():
1669 for pair in textpairs:
1687 for pair in textpairs:
1670 if xdiff:
1688 if xdiff:
1671 mdiff.bdiff.xdiffblocks(*pair)
1689 mdiff.bdiff.xdiffblocks(*pair)
1672 elif blocks:
1690 elif blocks:
1673 mdiff.bdiff.blocks(*pair)
1691 mdiff.bdiff.blocks(*pair)
1674 else:
1692 else:
1675 mdiff.textdiff(*pair)
1693 mdiff.textdiff(*pair)
1676 else:
1694 else:
1677 q = queue()
1695 q = queue()
1678 for i in _xrange(threads):
1696 for i in _xrange(threads):
1679 q.put(None)
1697 q.put(None)
1680 ready = threading.Condition()
1698 ready = threading.Condition()
1681 done = threading.Event()
1699 done = threading.Event()
1682 for i in _xrange(threads):
1700 for i in _xrange(threads):
1683 threading.Thread(target=_bdiffworker,
1701 threading.Thread(target=_bdiffworker,
1684 args=(q, blocks, xdiff, ready, done)).start()
1702 args=(q, blocks, xdiff, ready, done)).start()
1685 q.join()
1703 q.join()
1686 def d():
1704 def d():
1687 for pair in textpairs:
1705 for pair in textpairs:
1688 q.put(pair)
1706 q.put(pair)
1689 for i in _xrange(threads):
1707 for i in _xrange(threads):
1690 q.put(None)
1708 q.put(None)
1691 with ready:
1709 with ready:
1692 ready.notify_all()
1710 ready.notify_all()
1693 q.join()
1711 q.join()
1694 timer, fm = gettimer(ui, opts)
1712 timer, fm = gettimer(ui, opts)
1695 timer(d)
1713 timer(d)
1696 fm.end()
1714 fm.end()
1697
1715
1698 if withthreads:
1716 if withthreads:
1699 done.set()
1717 done.set()
1700 for i in _xrange(threads):
1718 for i in _xrange(threads):
1701 q.put(None)
1719 q.put(None)
1702 with ready:
1720 with ready:
1703 ready.notify_all()
1721 ready.notify_all()
1704
1722
1705 @command(b'perfunidiff', revlogopts + formatteropts + [
1723 @command(b'perfunidiff', revlogopts + formatteropts + [
1706 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1724 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1707 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1725 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1708 ], b'-c|-m|FILE REV')
1726 ], b'-c|-m|FILE REV')
1709 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1727 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1710 """benchmark a unified diff between revisions
1728 """benchmark a unified diff between revisions
1711
1729
1712 This doesn't include any copy tracing - it's just a unified diff
1730 This doesn't include any copy tracing - it's just a unified diff
1713 of the texts.
1731 of the texts.
1714
1732
1715 By default, benchmark a diff between its delta parent and itself.
1733 By default, benchmark a diff between its delta parent and itself.
1716
1734
1717 With ``--count``, benchmark diffs between delta parents and self for N
1735 With ``--count``, benchmark diffs between delta parents and self for N
1718 revisions starting at the specified revision.
1736 revisions starting at the specified revision.
1719
1737
1720 With ``--alldata``, assume the requested revision is a changeset and
1738 With ``--alldata``, assume the requested revision is a changeset and
1721 measure diffs for all changes related to that changeset (manifest
1739 measure diffs for all changes related to that changeset (manifest
1722 and filelogs).
1740 and filelogs).
1723 """
1741 """
1724 opts = _byteskwargs(opts)
1742 opts = _byteskwargs(opts)
1725 if opts[b'alldata']:
1743 if opts[b'alldata']:
1726 opts[b'changelog'] = True
1744 opts[b'changelog'] = True
1727
1745
1728 if opts.get(b'changelog') or opts.get(b'manifest'):
1746 if opts.get(b'changelog') or opts.get(b'manifest'):
1729 file_, rev = None, file_
1747 file_, rev = None, file_
1730 elif rev is None:
1748 elif rev is None:
1731 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1749 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1732
1750
1733 textpairs = []
1751 textpairs = []
1734
1752
1735 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1753 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1736
1754
1737 startrev = r.rev(r.lookup(rev))
1755 startrev = r.rev(r.lookup(rev))
1738 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1756 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1739 if opts[b'alldata']:
1757 if opts[b'alldata']:
1740 # Load revisions associated with changeset.
1758 # Load revisions associated with changeset.
1741 ctx = repo[rev]
1759 ctx = repo[rev]
1742 mtext = _manifestrevision(repo, ctx.manifestnode())
1760 mtext = _manifestrevision(repo, ctx.manifestnode())
1743 for pctx in ctx.parents():
1761 for pctx in ctx.parents():
1744 pman = _manifestrevision(repo, pctx.manifestnode())
1762 pman = _manifestrevision(repo, pctx.manifestnode())
1745 textpairs.append((pman, mtext))
1763 textpairs.append((pman, mtext))
1746
1764
1747 # Load filelog revisions by iterating manifest delta.
1765 # Load filelog revisions by iterating manifest delta.
1748 man = ctx.manifest()
1766 man = ctx.manifest()
1749 pman = ctx.p1().manifest()
1767 pman = ctx.p1().manifest()
1750 for filename, change in pman.diff(man).items():
1768 for filename, change in pman.diff(man).items():
1751 fctx = repo.file(filename)
1769 fctx = repo.file(filename)
1752 f1 = fctx.revision(change[0][0] or -1)
1770 f1 = fctx.revision(change[0][0] or -1)
1753 f2 = fctx.revision(change[1][0] or -1)
1771 f2 = fctx.revision(change[1][0] or -1)
1754 textpairs.append((f1, f2))
1772 textpairs.append((f1, f2))
1755 else:
1773 else:
1756 dp = r.deltaparent(rev)
1774 dp = r.deltaparent(rev)
1757 textpairs.append((r.revision(dp), r.revision(rev)))
1775 textpairs.append((r.revision(dp), r.revision(rev)))
1758
1776
1759 def d():
1777 def d():
1760 for left, right in textpairs:
1778 for left, right in textpairs:
1761 # The date strings don't matter, so we pass empty strings.
1779 # The date strings don't matter, so we pass empty strings.
1762 headerlines, hunks = mdiff.unidiff(
1780 headerlines, hunks = mdiff.unidiff(
1763 left, b'', right, b'', b'left', b'right', binary=False)
1781 left, b'', right, b'', b'left', b'right', binary=False)
1764 # consume iterators in roughly the way patch.py does
1782 # consume iterators in roughly the way patch.py does
1765 b'\n'.join(headerlines)
1783 b'\n'.join(headerlines)
1766 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1784 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1767 timer, fm = gettimer(ui, opts)
1785 timer, fm = gettimer(ui, opts)
1768 timer(d)
1786 timer(d)
1769 fm.end()
1787 fm.end()
1770
1788
1771 @command(b'perfdiffwd', formatteropts)
1789 @command(b'perfdiffwd', formatteropts)
1772 def perfdiffwd(ui, repo, **opts):
1790 def perfdiffwd(ui, repo, **opts):
1773 """Profile diff of working directory changes"""
1791 """Profile diff of working directory changes"""
1774 opts = _byteskwargs(opts)
1792 opts = _byteskwargs(opts)
1775 timer, fm = gettimer(ui, opts)
1793 timer, fm = gettimer(ui, opts)
1776 options = {
1794 options = {
1777 'w': 'ignore_all_space',
1795 'w': 'ignore_all_space',
1778 'b': 'ignore_space_change',
1796 'b': 'ignore_space_change',
1779 'B': 'ignore_blank_lines',
1797 'B': 'ignore_blank_lines',
1780 }
1798 }
1781
1799
1782 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1800 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1783 opts = dict((options[c], b'1') for c in diffopt)
1801 opts = dict((options[c], b'1') for c in diffopt)
1784 def d():
1802 def d():
1785 ui.pushbuffer()
1803 ui.pushbuffer()
1786 commands.diff(ui, repo, **opts)
1804 commands.diff(ui, repo, **opts)
1787 ui.popbuffer()
1805 ui.popbuffer()
1788 diffopt = diffopt.encode('ascii')
1806 diffopt = diffopt.encode('ascii')
1789 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1807 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1790 timer(d, title=title)
1808 timer(d, title=title)
1791 fm.end()
1809 fm.end()
1792
1810
1793 @command(b'perfrevlogindex', revlogopts + formatteropts,
1811 @command(b'perfrevlogindex', revlogopts + formatteropts,
1794 b'-c|-m|FILE')
1812 b'-c|-m|FILE')
1795 def perfrevlogindex(ui, repo, file_=None, **opts):
1813 def perfrevlogindex(ui, repo, file_=None, **opts):
1796 """Benchmark operations against a revlog index.
1814 """Benchmark operations against a revlog index.
1797
1815
1798 This tests constructing a revlog instance, reading index data,
1816 This tests constructing a revlog instance, reading index data,
1799 parsing index data, and performing various operations related to
1817 parsing index data, and performing various operations related to
1800 index data.
1818 index data.
1801 """
1819 """
1802
1820
1803 opts = _byteskwargs(opts)
1821 opts = _byteskwargs(opts)
1804
1822
1805 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1823 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1806
1824
1807 opener = getattr(rl, 'opener') # trick linter
1825 opener = getattr(rl, 'opener') # trick linter
1808 indexfile = rl.indexfile
1826 indexfile = rl.indexfile
1809 data = opener.read(indexfile)
1827 data = opener.read(indexfile)
1810
1828
1811 header = struct.unpack(b'>I', data[0:4])[0]
1829 header = struct.unpack(b'>I', data[0:4])[0]
1812 version = header & 0xFFFF
1830 version = header & 0xFFFF
1813 if version == 1:
1831 if version == 1:
1814 revlogio = revlog.revlogio()
1832 revlogio = revlog.revlogio()
1815 inline = header & (1 << 16)
1833 inline = header & (1 << 16)
1816 else:
1834 else:
1817 raise error.Abort((b'unsupported revlog version: %d') % version)
1835 raise error.Abort((b'unsupported revlog version: %d') % version)
1818
1836
1819 rllen = len(rl)
1837 rllen = len(rl)
1820
1838
1821 node0 = rl.node(0)
1839 node0 = rl.node(0)
1822 node25 = rl.node(rllen // 4)
1840 node25 = rl.node(rllen // 4)
1823 node50 = rl.node(rllen // 2)
1841 node50 = rl.node(rllen // 2)
1824 node75 = rl.node(rllen // 4 * 3)
1842 node75 = rl.node(rllen // 4 * 3)
1825 node100 = rl.node(rllen - 1)
1843 node100 = rl.node(rllen - 1)
1826
1844
1827 allrevs = range(rllen)
1845 allrevs = range(rllen)
1828 allrevsrev = list(reversed(allrevs))
1846 allrevsrev = list(reversed(allrevs))
1829 allnodes = [rl.node(rev) for rev in range(rllen)]
1847 allnodes = [rl.node(rev) for rev in range(rllen)]
1830 allnodesrev = list(reversed(allnodes))
1848 allnodesrev = list(reversed(allnodes))
1831
1849
1832 def constructor():
1850 def constructor():
1833 revlog.revlog(opener, indexfile)
1851 revlog.revlog(opener, indexfile)
1834
1852
1835 def read():
1853 def read():
1836 with opener(indexfile) as fh:
1854 with opener(indexfile) as fh:
1837 fh.read()
1855 fh.read()
1838
1856
1839 def parseindex():
1857 def parseindex():
1840 revlogio.parseindex(data, inline)
1858 revlogio.parseindex(data, inline)
1841
1859
1842 def getentry(revornode):
1860 def getentry(revornode):
1843 index = revlogio.parseindex(data, inline)[0]
1861 index = revlogio.parseindex(data, inline)[0]
1844 index[revornode]
1862 index[revornode]
1845
1863
1846 def getentries(revs, count=1):
1864 def getentries(revs, count=1):
1847 index = revlogio.parseindex(data, inline)[0]
1865 index = revlogio.parseindex(data, inline)[0]
1848
1866
1849 for i in range(count):
1867 for i in range(count):
1850 for rev in revs:
1868 for rev in revs:
1851 index[rev]
1869 index[rev]
1852
1870
1853 def resolvenode(node):
1871 def resolvenode(node):
1854 nodemap = revlogio.parseindex(data, inline)[1]
1872 nodemap = revlogio.parseindex(data, inline)[1]
1855 # This only works for the C code.
1873 # This only works for the C code.
1856 if nodemap is None:
1874 if nodemap is None:
1857 return
1875 return
1858
1876
1859 try:
1877 try:
1860 nodemap[node]
1878 nodemap[node]
1861 except error.RevlogError:
1879 except error.RevlogError:
1862 pass
1880 pass
1863
1881
1864 def resolvenodes(nodes, count=1):
1882 def resolvenodes(nodes, count=1):
1865 nodemap = revlogio.parseindex(data, inline)[1]
1883 nodemap = revlogio.parseindex(data, inline)[1]
1866 if nodemap is None:
1884 if nodemap is None:
1867 return
1885 return
1868
1886
1869 for i in range(count):
1887 for i in range(count):
1870 for node in nodes:
1888 for node in nodes:
1871 try:
1889 try:
1872 nodemap[node]
1890 nodemap[node]
1873 except error.RevlogError:
1891 except error.RevlogError:
1874 pass
1892 pass
1875
1893
1876 benches = [
1894 benches = [
1877 (constructor, b'revlog constructor'),
1895 (constructor, b'revlog constructor'),
1878 (read, b'read'),
1896 (read, b'read'),
1879 (parseindex, b'create index object'),
1897 (parseindex, b'create index object'),
1880 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1898 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1881 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1899 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1882 (lambda: resolvenode(node0), b'look up node at rev 0'),
1900 (lambda: resolvenode(node0), b'look up node at rev 0'),
1883 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1901 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1884 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1902 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1885 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1903 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1886 (lambda: resolvenode(node100), b'look up node at tip'),
1904 (lambda: resolvenode(node100), b'look up node at tip'),
1887 # 2x variation is to measure caching impact.
1905 # 2x variation is to measure caching impact.
1888 (lambda: resolvenodes(allnodes),
1906 (lambda: resolvenodes(allnodes),
1889 b'look up all nodes (forward)'),
1907 b'look up all nodes (forward)'),
1890 (lambda: resolvenodes(allnodes, 2),
1908 (lambda: resolvenodes(allnodes, 2),
1891 b'look up all nodes 2x (forward)'),
1909 b'look up all nodes 2x (forward)'),
1892 (lambda: resolvenodes(allnodesrev),
1910 (lambda: resolvenodes(allnodesrev),
1893 b'look up all nodes (reverse)'),
1911 b'look up all nodes (reverse)'),
1894 (lambda: resolvenodes(allnodesrev, 2),
1912 (lambda: resolvenodes(allnodesrev, 2),
1895 b'look up all nodes 2x (reverse)'),
1913 b'look up all nodes 2x (reverse)'),
1896 (lambda: getentries(allrevs),
1914 (lambda: getentries(allrevs),
1897 b'retrieve all index entries (forward)'),
1915 b'retrieve all index entries (forward)'),
1898 (lambda: getentries(allrevs, 2),
1916 (lambda: getentries(allrevs, 2),
1899 b'retrieve all index entries 2x (forward)'),
1917 b'retrieve all index entries 2x (forward)'),
1900 (lambda: getentries(allrevsrev),
1918 (lambda: getentries(allrevsrev),
1901 b'retrieve all index entries (reverse)'),
1919 b'retrieve all index entries (reverse)'),
1902 (lambda: getentries(allrevsrev, 2),
1920 (lambda: getentries(allrevsrev, 2),
1903 b'retrieve all index entries 2x (reverse)'),
1921 b'retrieve all index entries 2x (reverse)'),
1904 ]
1922 ]
1905
1923
1906 for fn, title in benches:
1924 for fn, title in benches:
1907 timer, fm = gettimer(ui, opts)
1925 timer, fm = gettimer(ui, opts)
1908 timer(fn, title=title)
1926 timer(fn, title=title)
1909 fm.end()
1927 fm.end()
1910
1928
1911 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1929 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1912 [(b'd', b'dist', 100, b'distance between the revisions'),
1930 [(b'd', b'dist', 100, b'distance between the revisions'),
1913 (b's', b'startrev', 0, b'revision to start reading at'),
1931 (b's', b'startrev', 0, b'revision to start reading at'),
1914 (b'', b'reverse', False, b'read in reverse')],
1932 (b'', b'reverse', False, b'read in reverse')],
1915 b'-c|-m|FILE')
1933 b'-c|-m|FILE')
1916 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1934 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1917 **opts):
1935 **opts):
1918 """Benchmark reading a series of revisions from a revlog.
1936 """Benchmark reading a series of revisions from a revlog.
1919
1937
1920 By default, we read every ``-d/--dist`` revision from 0 to tip of
1938 By default, we read every ``-d/--dist`` revision from 0 to tip of
1921 the specified revlog.
1939 the specified revlog.
1922
1940
1923 The start revision can be defined via ``-s/--startrev``.
1941 The start revision can be defined via ``-s/--startrev``.
1924 """
1942 """
1925 opts = _byteskwargs(opts)
1943 opts = _byteskwargs(opts)
1926
1944
1927 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1945 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1928 rllen = getlen(ui)(rl)
1946 rllen = getlen(ui)(rl)
1929
1947
1930 if startrev < 0:
1948 if startrev < 0:
1931 startrev = rllen + startrev
1949 startrev = rllen + startrev
1932
1950
1933 def d():
1951 def d():
1934 rl.clearcaches()
1952 rl.clearcaches()
1935
1953
1936 beginrev = startrev
1954 beginrev = startrev
1937 endrev = rllen
1955 endrev = rllen
1938 dist = opts[b'dist']
1956 dist = opts[b'dist']
1939
1957
1940 if reverse:
1958 if reverse:
1941 beginrev, endrev = endrev - 1, beginrev - 1
1959 beginrev, endrev = endrev - 1, beginrev - 1
1942 dist = -1 * dist
1960 dist = -1 * dist
1943
1961
1944 for x in _xrange(beginrev, endrev, dist):
1962 for x in _xrange(beginrev, endrev, dist):
1945 # Old revisions don't support passing int.
1963 # Old revisions don't support passing int.
1946 n = rl.node(x)
1964 n = rl.node(x)
1947 rl.revision(n)
1965 rl.revision(n)
1948
1966
1949 timer, fm = gettimer(ui, opts)
1967 timer, fm = gettimer(ui, opts)
1950 timer(d)
1968 timer(d)
1951 fm.end()
1969 fm.end()
1952
1970
1953 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1971 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1954 [(b's', b'startrev', 1000, b'revision to start writing at'),
1972 [(b's', b'startrev', 1000, b'revision to start writing at'),
1955 (b'', b'stoprev', -1, b'last revision to write'),
1973 (b'', b'stoprev', -1, b'last revision to write'),
1956 (b'', b'count', 3, b'last revision to write'),
1974 (b'', b'count', 3, b'last revision to write'),
1957 (b'', b'details', False, b'print timing for every revisions tested'),
1975 (b'', b'details', False, b'print timing for every revisions tested'),
1958 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1976 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1959 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1977 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1960 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1978 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1961 ],
1979 ],
1962 b'-c|-m|FILE')
1980 b'-c|-m|FILE')
1963 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1981 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1964 """Benchmark writing a series of revisions to a revlog.
1982 """Benchmark writing a series of revisions to a revlog.
1965
1983
1966 Possible source values are:
1984 Possible source values are:
1967 * `full`: add from a full text (default).
1985 * `full`: add from a full text (default).
1968 * `parent-1`: add from a delta to the first parent
1986 * `parent-1`: add from a delta to the first parent
1969 * `parent-2`: add from a delta to the second parent if it exists
1987 * `parent-2`: add from a delta to the second parent if it exists
1970 (use a delta from the first parent otherwise)
1988 (use a delta from the first parent otherwise)
1971 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1989 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1972 * `storage`: add from the existing precomputed deltas
1990 * `storage`: add from the existing precomputed deltas
1973 """
1991 """
1974 opts = _byteskwargs(opts)
1992 opts = _byteskwargs(opts)
1975
1993
1976 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1994 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1977 rllen = getlen(ui)(rl)
1995 rllen = getlen(ui)(rl)
1978 if startrev < 0:
1996 if startrev < 0:
1979 startrev = rllen + startrev
1997 startrev = rllen + startrev
1980 if stoprev < 0:
1998 if stoprev < 0:
1981 stoprev = rllen + stoprev
1999 stoprev = rllen + stoprev
1982
2000
1983 lazydeltabase = opts['lazydeltabase']
2001 lazydeltabase = opts['lazydeltabase']
1984 source = opts['source']
2002 source = opts['source']
1985 clearcaches = opts['clear_caches']
2003 clearcaches = opts['clear_caches']
1986 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2004 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1987 b'storage')
2005 b'storage')
1988 if source not in validsource:
2006 if source not in validsource:
1989 raise error.Abort('invalid source type: %s' % source)
2007 raise error.Abort('invalid source type: %s' % source)
1990
2008
1991 ### actually gather results
2009 ### actually gather results
1992 count = opts['count']
2010 count = opts['count']
1993 if count <= 0:
2011 if count <= 0:
1994 raise error.Abort('invalide run count: %d' % count)
2012 raise error.Abort('invalide run count: %d' % count)
1995 allresults = []
2013 allresults = []
1996 for c in range(count):
2014 for c in range(count):
1997 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2015 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1998 lazydeltabase=lazydeltabase,
2016 lazydeltabase=lazydeltabase,
1999 clearcaches=clearcaches)
2017 clearcaches=clearcaches)
2000 allresults.append(timing)
2018 allresults.append(timing)
2001
2019
2002 ### consolidate the results in a single list
2020 ### consolidate the results in a single list
2003 results = []
2021 results = []
2004 for idx, (rev, t) in enumerate(allresults[0]):
2022 for idx, (rev, t) in enumerate(allresults[0]):
2005 ts = [t]
2023 ts = [t]
2006 for other in allresults[1:]:
2024 for other in allresults[1:]:
2007 orev, ot = other[idx]
2025 orev, ot = other[idx]
2008 assert orev == rev
2026 assert orev == rev
2009 ts.append(ot)
2027 ts.append(ot)
2010 results.append((rev, ts))
2028 results.append((rev, ts))
2011 resultcount = len(results)
2029 resultcount = len(results)
2012
2030
2013 ### Compute and display relevant statistics
2031 ### Compute and display relevant statistics
2014
2032
2015 # get a formatter
2033 # get a formatter
2016 fm = ui.formatter(b'perf', opts)
2034 fm = ui.formatter(b'perf', opts)
2017 displayall = ui.configbool(b"perf", b"all-timing", False)
2035 displayall = ui.configbool(b"perf", b"all-timing", False)
2018
2036
2019 # print individual details if requested
2037 # print individual details if requested
2020 if opts['details']:
2038 if opts['details']:
2021 for idx, item in enumerate(results, 1):
2039 for idx, item in enumerate(results, 1):
2022 rev, data = item
2040 rev, data = item
2023 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2041 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2024 formatone(fm, data, title=title, displayall=displayall)
2042 formatone(fm, data, title=title, displayall=displayall)
2025
2043
2026 # sorts results by median time
2044 # sorts results by median time
2027 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2045 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2028 # list of (name, index) to display)
2046 # list of (name, index) to display)
2029 relevants = [
2047 relevants = [
2030 ("min", 0),
2048 ("min", 0),
2031 ("10%", resultcount * 10 // 100),
2049 ("10%", resultcount * 10 // 100),
2032 ("25%", resultcount * 25 // 100),
2050 ("25%", resultcount * 25 // 100),
2033 ("50%", resultcount * 70 // 100),
2051 ("50%", resultcount * 70 // 100),
2034 ("75%", resultcount * 75 // 100),
2052 ("75%", resultcount * 75 // 100),
2035 ("90%", resultcount * 90 // 100),
2053 ("90%", resultcount * 90 // 100),
2036 ("95%", resultcount * 95 // 100),
2054 ("95%", resultcount * 95 // 100),
2037 ("99%", resultcount * 99 // 100),
2055 ("99%", resultcount * 99 // 100),
2038 ("99.9%", resultcount * 999 // 1000),
2056 ("99.9%", resultcount * 999 // 1000),
2039 ("99.99%", resultcount * 9999 // 10000),
2057 ("99.99%", resultcount * 9999 // 10000),
2040 ("99.999%", resultcount * 99999 // 100000),
2058 ("99.999%", resultcount * 99999 // 100000),
2041 ("max", -1),
2059 ("max", -1),
2042 ]
2060 ]
2043 if not ui.quiet:
2061 if not ui.quiet:
2044 for name, idx in relevants:
2062 for name, idx in relevants:
2045 data = results[idx]
2063 data = results[idx]
2046 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2064 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2047 formatone(fm, data[1], title=title, displayall=displayall)
2065 formatone(fm, data[1], title=title, displayall=displayall)
2048
2066
2049 # XXX summing that many float will not be very precise, we ignore this fact
2067 # XXX summing that many float will not be very precise, we ignore this fact
2050 # for now
2068 # for now
2051 totaltime = []
2069 totaltime = []
2052 for item in allresults:
2070 for item in allresults:
2053 totaltime.append((sum(x[1][0] for x in item),
2071 totaltime.append((sum(x[1][0] for x in item),
2054 sum(x[1][1] for x in item),
2072 sum(x[1][1] for x in item),
2055 sum(x[1][2] for x in item),)
2073 sum(x[1][2] for x in item),)
2056 )
2074 )
2057 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2075 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2058 displayall=displayall)
2076 displayall=displayall)
2059 fm.end()
2077 fm.end()
2060
2078
2061 class _faketr(object):
2079 class _faketr(object):
2062 def add(s, x, y, z=None):
2080 def add(s, x, y, z=None):
2063 return None
2081 return None
2064
2082
2065 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2083 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2066 lazydeltabase=True, clearcaches=True):
2084 lazydeltabase=True, clearcaches=True):
2067 timings = []
2085 timings = []
2068 tr = _faketr()
2086 tr = _faketr()
2069 with _temprevlog(ui, orig, startrev) as dest:
2087 with _temprevlog(ui, orig, startrev) as dest:
2070 dest._lazydeltabase = lazydeltabase
2088 dest._lazydeltabase = lazydeltabase
2071 revs = list(orig.revs(startrev, stoprev))
2089 revs = list(orig.revs(startrev, stoprev))
2072 total = len(revs)
2090 total = len(revs)
2073 topic = 'adding'
2091 topic = 'adding'
2074 if runidx is not None:
2092 if runidx is not None:
2075 topic += ' (run #%d)' % runidx
2093 topic += ' (run #%d)' % runidx
2076 # Support both old and new progress API
2094 # Support both old and new progress API
2077 if util.safehasattr(ui, 'makeprogress'):
2095 if util.safehasattr(ui, 'makeprogress'):
2078 progress = ui.makeprogress(topic, unit='revs', total=total)
2096 progress = ui.makeprogress(topic, unit='revs', total=total)
2079 def updateprogress(pos):
2097 def updateprogress(pos):
2080 progress.update(pos)
2098 progress.update(pos)
2081 def completeprogress():
2099 def completeprogress():
2082 progress.complete()
2100 progress.complete()
2083 else:
2101 else:
2084 def updateprogress(pos):
2102 def updateprogress(pos):
2085 ui.progress(topic, pos, unit='revs', total=total)
2103 ui.progress(topic, pos, unit='revs', total=total)
2086 def completeprogress():
2104 def completeprogress():
2087 ui.progress(topic, None, unit='revs', total=total)
2105 ui.progress(topic, None, unit='revs', total=total)
2088
2106
2089 for idx, rev in enumerate(revs):
2107 for idx, rev in enumerate(revs):
2090 updateprogress(idx)
2108 updateprogress(idx)
2091 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2109 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2092 if clearcaches:
2110 if clearcaches:
2093 dest.index.clearcaches()
2111 dest.index.clearcaches()
2094 dest.clearcaches()
2112 dest.clearcaches()
2095 with timeone() as r:
2113 with timeone() as r:
2096 dest.addrawrevision(*addargs, **addkwargs)
2114 dest.addrawrevision(*addargs, **addkwargs)
2097 timings.append((rev, r[0]))
2115 timings.append((rev, r[0]))
2098 updateprogress(total)
2116 updateprogress(total)
2099 completeprogress()
2117 completeprogress()
2100 return timings
2118 return timings
2101
2119
2102 def _getrevisionseed(orig, rev, tr, source):
2120 def _getrevisionseed(orig, rev, tr, source):
2103 from mercurial.node import nullid
2121 from mercurial.node import nullid
2104
2122
2105 linkrev = orig.linkrev(rev)
2123 linkrev = orig.linkrev(rev)
2106 node = orig.node(rev)
2124 node = orig.node(rev)
2107 p1, p2 = orig.parents(node)
2125 p1, p2 = orig.parents(node)
2108 flags = orig.flags(rev)
2126 flags = orig.flags(rev)
2109 cachedelta = None
2127 cachedelta = None
2110 text = None
2128 text = None
2111
2129
2112 if source == b'full':
2130 if source == b'full':
2113 text = orig.revision(rev)
2131 text = orig.revision(rev)
2114 elif source == b'parent-1':
2132 elif source == b'parent-1':
2115 baserev = orig.rev(p1)
2133 baserev = orig.rev(p1)
2116 cachedelta = (baserev, orig.revdiff(p1, rev))
2134 cachedelta = (baserev, orig.revdiff(p1, rev))
2117 elif source == b'parent-2':
2135 elif source == b'parent-2':
2118 parent = p2
2136 parent = p2
2119 if p2 == nullid:
2137 if p2 == nullid:
2120 parent = p1
2138 parent = p1
2121 baserev = orig.rev(parent)
2139 baserev = orig.rev(parent)
2122 cachedelta = (baserev, orig.revdiff(parent, rev))
2140 cachedelta = (baserev, orig.revdiff(parent, rev))
2123 elif source == b'parent-smallest':
2141 elif source == b'parent-smallest':
2124 p1diff = orig.revdiff(p1, rev)
2142 p1diff = orig.revdiff(p1, rev)
2125 parent = p1
2143 parent = p1
2126 diff = p1diff
2144 diff = p1diff
2127 if p2 != nullid:
2145 if p2 != nullid:
2128 p2diff = orig.revdiff(p2, rev)
2146 p2diff = orig.revdiff(p2, rev)
2129 if len(p1diff) > len(p2diff):
2147 if len(p1diff) > len(p2diff):
2130 parent = p2
2148 parent = p2
2131 diff = p2diff
2149 diff = p2diff
2132 baserev = orig.rev(parent)
2150 baserev = orig.rev(parent)
2133 cachedelta = (baserev, diff)
2151 cachedelta = (baserev, diff)
2134 elif source == b'storage':
2152 elif source == b'storage':
2135 baserev = orig.deltaparent(rev)
2153 baserev = orig.deltaparent(rev)
2136 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2154 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2137
2155
2138 return ((text, tr, linkrev, p1, p2),
2156 return ((text, tr, linkrev, p1, p2),
2139 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2157 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2140
2158
2141 @contextlib.contextmanager
2159 @contextlib.contextmanager
2142 def _temprevlog(ui, orig, truncaterev):
2160 def _temprevlog(ui, orig, truncaterev):
2143 from mercurial import vfs as vfsmod
2161 from mercurial import vfs as vfsmod
2144
2162
2145 if orig._inline:
2163 if orig._inline:
2146 raise error.Abort('not supporting inline revlog (yet)')
2164 raise error.Abort('not supporting inline revlog (yet)')
2147
2165
2148 origindexpath = orig.opener.join(orig.indexfile)
2166 origindexpath = orig.opener.join(orig.indexfile)
2149 origdatapath = orig.opener.join(orig.datafile)
2167 origdatapath = orig.opener.join(orig.datafile)
2150 indexname = 'revlog.i'
2168 indexname = 'revlog.i'
2151 dataname = 'revlog.d'
2169 dataname = 'revlog.d'
2152
2170
2153 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2171 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2154 try:
2172 try:
2155 # copy the data file in a temporary directory
2173 # copy the data file in a temporary directory
2156 ui.debug('copying data in %s\n' % tmpdir)
2174 ui.debug('copying data in %s\n' % tmpdir)
2157 destindexpath = os.path.join(tmpdir, 'revlog.i')
2175 destindexpath = os.path.join(tmpdir, 'revlog.i')
2158 destdatapath = os.path.join(tmpdir, 'revlog.d')
2176 destdatapath = os.path.join(tmpdir, 'revlog.d')
2159 shutil.copyfile(origindexpath, destindexpath)
2177 shutil.copyfile(origindexpath, destindexpath)
2160 shutil.copyfile(origdatapath, destdatapath)
2178 shutil.copyfile(origdatapath, destdatapath)
2161
2179
2162 # remove the data we want to add again
2180 # remove the data we want to add again
2163 ui.debug('truncating data to be rewritten\n')
2181 ui.debug('truncating data to be rewritten\n')
2164 with open(destindexpath, 'ab') as index:
2182 with open(destindexpath, 'ab') as index:
2165 index.seek(0)
2183 index.seek(0)
2166 index.truncate(truncaterev * orig._io.size)
2184 index.truncate(truncaterev * orig._io.size)
2167 with open(destdatapath, 'ab') as data:
2185 with open(destdatapath, 'ab') as data:
2168 data.seek(0)
2186 data.seek(0)
2169 data.truncate(orig.start(truncaterev))
2187 data.truncate(orig.start(truncaterev))
2170
2188
2171 # instantiate a new revlog from the temporary copy
2189 # instantiate a new revlog from the temporary copy
2172 ui.debug('truncating adding to be rewritten\n')
2190 ui.debug('truncating adding to be rewritten\n')
2173 vfs = vfsmod.vfs(tmpdir)
2191 vfs = vfsmod.vfs(tmpdir)
2174 vfs.options = getattr(orig.opener, 'options', None)
2192 vfs.options = getattr(orig.opener, 'options', None)
2175
2193
2176 dest = revlog.revlog(vfs,
2194 dest = revlog.revlog(vfs,
2177 indexfile=indexname,
2195 indexfile=indexname,
2178 datafile=dataname)
2196 datafile=dataname)
2179 if dest._inline:
2197 if dest._inline:
2180 raise error.Abort('not supporting inline revlog (yet)')
2198 raise error.Abort('not supporting inline revlog (yet)')
2181 # make sure internals are initialized
2199 # make sure internals are initialized
2182 dest.revision(len(dest) - 1)
2200 dest.revision(len(dest) - 1)
2183 yield dest
2201 yield dest
2184 del dest, vfs
2202 del dest, vfs
2185 finally:
2203 finally:
2186 shutil.rmtree(tmpdir, True)
2204 shutil.rmtree(tmpdir, True)
2187
2205
2188 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2206 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2189 [(b'e', b'engines', b'', b'compression engines to use'),
2207 [(b'e', b'engines', b'', b'compression engines to use'),
2190 (b's', b'startrev', 0, b'revision to start at')],
2208 (b's', b'startrev', 0, b'revision to start at')],
2191 b'-c|-m|FILE')
2209 b'-c|-m|FILE')
2192 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2210 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2193 """Benchmark operations on revlog chunks.
2211 """Benchmark operations on revlog chunks.
2194
2212
2195 Logically, each revlog is a collection of fulltext revisions. However,
2213 Logically, each revlog is a collection of fulltext revisions. However,
2196 stored within each revlog are "chunks" of possibly compressed data. This
2214 stored within each revlog are "chunks" of possibly compressed data. This
2197 data needs to be read and decompressed or compressed and written.
2215 data needs to be read and decompressed or compressed and written.
2198
2216
2199 This command measures the time it takes to read+decompress and recompress
2217 This command measures the time it takes to read+decompress and recompress
2200 chunks in a revlog. It effectively isolates I/O and compression performance.
2218 chunks in a revlog. It effectively isolates I/O and compression performance.
2201 For measurements of higher-level operations like resolving revisions,
2219 For measurements of higher-level operations like resolving revisions,
2202 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2220 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2203 """
2221 """
2204 opts = _byteskwargs(opts)
2222 opts = _byteskwargs(opts)
2205
2223
2206 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2224 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2207
2225
2208 # _chunkraw was renamed to _getsegmentforrevs.
2226 # _chunkraw was renamed to _getsegmentforrevs.
2209 try:
2227 try:
2210 segmentforrevs = rl._getsegmentforrevs
2228 segmentforrevs = rl._getsegmentforrevs
2211 except AttributeError:
2229 except AttributeError:
2212 segmentforrevs = rl._chunkraw
2230 segmentforrevs = rl._chunkraw
2213
2231
2214 # Verify engines argument.
2232 # Verify engines argument.
2215 if engines:
2233 if engines:
2216 engines = set(e.strip() for e in engines.split(b','))
2234 engines = set(e.strip() for e in engines.split(b','))
2217 for engine in engines:
2235 for engine in engines:
2218 try:
2236 try:
2219 util.compressionengines[engine]
2237 util.compressionengines[engine]
2220 except KeyError:
2238 except KeyError:
2221 raise error.Abort(b'unknown compression engine: %s' % engine)
2239 raise error.Abort(b'unknown compression engine: %s' % engine)
2222 else:
2240 else:
2223 engines = []
2241 engines = []
2224 for e in util.compengines:
2242 for e in util.compengines:
2225 engine = util.compengines[e]
2243 engine = util.compengines[e]
2226 try:
2244 try:
2227 if engine.available():
2245 if engine.available():
2228 engine.revlogcompressor().compress(b'dummy')
2246 engine.revlogcompressor().compress(b'dummy')
2229 engines.append(e)
2247 engines.append(e)
2230 except NotImplementedError:
2248 except NotImplementedError:
2231 pass
2249 pass
2232
2250
2233 revs = list(rl.revs(startrev, len(rl) - 1))
2251 revs = list(rl.revs(startrev, len(rl) - 1))
2234
2252
2235 def rlfh(rl):
2253 def rlfh(rl):
2236 if rl._inline:
2254 if rl._inline:
2237 return getsvfs(repo)(rl.indexfile)
2255 return getsvfs(repo)(rl.indexfile)
2238 else:
2256 else:
2239 return getsvfs(repo)(rl.datafile)
2257 return getsvfs(repo)(rl.datafile)
2240
2258
2241 def doread():
2259 def doread():
2242 rl.clearcaches()
2260 rl.clearcaches()
2243 for rev in revs:
2261 for rev in revs:
2244 segmentforrevs(rev, rev)
2262 segmentforrevs(rev, rev)
2245
2263
2246 def doreadcachedfh():
2264 def doreadcachedfh():
2247 rl.clearcaches()
2265 rl.clearcaches()
2248 fh = rlfh(rl)
2266 fh = rlfh(rl)
2249 for rev in revs:
2267 for rev in revs:
2250 segmentforrevs(rev, rev, df=fh)
2268 segmentforrevs(rev, rev, df=fh)
2251
2269
2252 def doreadbatch():
2270 def doreadbatch():
2253 rl.clearcaches()
2271 rl.clearcaches()
2254 segmentforrevs(revs[0], revs[-1])
2272 segmentforrevs(revs[0], revs[-1])
2255
2273
2256 def doreadbatchcachedfh():
2274 def doreadbatchcachedfh():
2257 rl.clearcaches()
2275 rl.clearcaches()
2258 fh = rlfh(rl)
2276 fh = rlfh(rl)
2259 segmentforrevs(revs[0], revs[-1], df=fh)
2277 segmentforrevs(revs[0], revs[-1], df=fh)
2260
2278
2261 def dochunk():
2279 def dochunk():
2262 rl.clearcaches()
2280 rl.clearcaches()
2263 fh = rlfh(rl)
2281 fh = rlfh(rl)
2264 for rev in revs:
2282 for rev in revs:
2265 rl._chunk(rev, df=fh)
2283 rl._chunk(rev, df=fh)
2266
2284
2267 chunks = [None]
2285 chunks = [None]
2268
2286
2269 def dochunkbatch():
2287 def dochunkbatch():
2270 rl.clearcaches()
2288 rl.clearcaches()
2271 fh = rlfh(rl)
2289 fh = rlfh(rl)
2272 # Save chunks as a side-effect.
2290 # Save chunks as a side-effect.
2273 chunks[0] = rl._chunks(revs, df=fh)
2291 chunks[0] = rl._chunks(revs, df=fh)
2274
2292
2275 def docompress(compressor):
2293 def docompress(compressor):
2276 rl.clearcaches()
2294 rl.clearcaches()
2277
2295
2278 try:
2296 try:
2279 # Swap in the requested compression engine.
2297 # Swap in the requested compression engine.
2280 oldcompressor = rl._compressor
2298 oldcompressor = rl._compressor
2281 rl._compressor = compressor
2299 rl._compressor = compressor
2282 for chunk in chunks[0]:
2300 for chunk in chunks[0]:
2283 rl.compress(chunk)
2301 rl.compress(chunk)
2284 finally:
2302 finally:
2285 rl._compressor = oldcompressor
2303 rl._compressor = oldcompressor
2286
2304
2287 benches = [
2305 benches = [
2288 (lambda: doread(), b'read'),
2306 (lambda: doread(), b'read'),
2289 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2307 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2290 (lambda: doreadbatch(), b'read batch'),
2308 (lambda: doreadbatch(), b'read batch'),
2291 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2309 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2292 (lambda: dochunk(), b'chunk'),
2310 (lambda: dochunk(), b'chunk'),
2293 (lambda: dochunkbatch(), b'chunk batch'),
2311 (lambda: dochunkbatch(), b'chunk batch'),
2294 ]
2312 ]
2295
2313
2296 for engine in sorted(engines):
2314 for engine in sorted(engines):
2297 compressor = util.compengines[engine].revlogcompressor()
2315 compressor = util.compengines[engine].revlogcompressor()
2298 benches.append((functools.partial(docompress, compressor),
2316 benches.append((functools.partial(docompress, compressor),
2299 b'compress w/ %s' % engine))
2317 b'compress w/ %s' % engine))
2300
2318
2301 for fn, title in benches:
2319 for fn, title in benches:
2302 timer, fm = gettimer(ui, opts)
2320 timer, fm = gettimer(ui, opts)
2303 timer(fn, title=title)
2321 timer(fn, title=title)
2304 fm.end()
2322 fm.end()
2305
2323
2306 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2324 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2307 [(b'', b'cache', False, b'use caches instead of clearing')],
2325 [(b'', b'cache', False, b'use caches instead of clearing')],
2308 b'-c|-m|FILE REV')
2326 b'-c|-m|FILE REV')
2309 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2327 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2310 """Benchmark obtaining a revlog revision.
2328 """Benchmark obtaining a revlog revision.
2311
2329
2312 Obtaining a revlog revision consists of roughly the following steps:
2330 Obtaining a revlog revision consists of roughly the following steps:
2313
2331
2314 1. Compute the delta chain
2332 1. Compute the delta chain
2315 2. Slice the delta chain if applicable
2333 2. Slice the delta chain if applicable
2316 3. Obtain the raw chunks for that delta chain
2334 3. Obtain the raw chunks for that delta chain
2317 4. Decompress each raw chunk
2335 4. Decompress each raw chunk
2318 5. Apply binary patches to obtain fulltext
2336 5. Apply binary patches to obtain fulltext
2319 6. Verify hash of fulltext
2337 6. Verify hash of fulltext
2320
2338
2321 This command measures the time spent in each of these phases.
2339 This command measures the time spent in each of these phases.
2322 """
2340 """
2323 opts = _byteskwargs(opts)
2341 opts = _byteskwargs(opts)
2324
2342
2325 if opts.get(b'changelog') or opts.get(b'manifest'):
2343 if opts.get(b'changelog') or opts.get(b'manifest'):
2326 file_, rev = None, file_
2344 file_, rev = None, file_
2327 elif rev is None:
2345 elif rev is None:
2328 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2346 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2329
2347
2330 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2348 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2331
2349
2332 # _chunkraw was renamed to _getsegmentforrevs.
2350 # _chunkraw was renamed to _getsegmentforrevs.
2333 try:
2351 try:
2334 segmentforrevs = r._getsegmentforrevs
2352 segmentforrevs = r._getsegmentforrevs
2335 except AttributeError:
2353 except AttributeError:
2336 segmentforrevs = r._chunkraw
2354 segmentforrevs = r._chunkraw
2337
2355
2338 node = r.lookup(rev)
2356 node = r.lookup(rev)
2339 rev = r.rev(node)
2357 rev = r.rev(node)
2340
2358
2341 def getrawchunks(data, chain):
2359 def getrawchunks(data, chain):
2342 start = r.start
2360 start = r.start
2343 length = r.length
2361 length = r.length
2344 inline = r._inline
2362 inline = r._inline
2345 iosize = r._io.size
2363 iosize = r._io.size
2346 buffer = util.buffer
2364 buffer = util.buffer
2347
2365
2348 chunks = []
2366 chunks = []
2349 ladd = chunks.append
2367 ladd = chunks.append
2350 for idx, item in enumerate(chain):
2368 for idx, item in enumerate(chain):
2351 offset = start(item[0])
2369 offset = start(item[0])
2352 bits = data[idx]
2370 bits = data[idx]
2353 for rev in item:
2371 for rev in item:
2354 chunkstart = start(rev)
2372 chunkstart = start(rev)
2355 if inline:
2373 if inline:
2356 chunkstart += (rev + 1) * iosize
2374 chunkstart += (rev + 1) * iosize
2357 chunklength = length(rev)
2375 chunklength = length(rev)
2358 ladd(buffer(bits, chunkstart - offset, chunklength))
2376 ladd(buffer(bits, chunkstart - offset, chunklength))
2359
2377
2360 return chunks
2378 return chunks
2361
2379
2362 def dodeltachain(rev):
2380 def dodeltachain(rev):
2363 if not cache:
2381 if not cache:
2364 r.clearcaches()
2382 r.clearcaches()
2365 r._deltachain(rev)
2383 r._deltachain(rev)
2366
2384
2367 def doread(chain):
2385 def doread(chain):
2368 if not cache:
2386 if not cache:
2369 r.clearcaches()
2387 r.clearcaches()
2370 for item in slicedchain:
2388 for item in slicedchain:
2371 segmentforrevs(item[0], item[-1])
2389 segmentforrevs(item[0], item[-1])
2372
2390
2373 def doslice(r, chain, size):
2391 def doslice(r, chain, size):
2374 for s in slicechunk(r, chain, targetsize=size):
2392 for s in slicechunk(r, chain, targetsize=size):
2375 pass
2393 pass
2376
2394
2377 def dorawchunks(data, chain):
2395 def dorawchunks(data, chain):
2378 if not cache:
2396 if not cache:
2379 r.clearcaches()
2397 r.clearcaches()
2380 getrawchunks(data, chain)
2398 getrawchunks(data, chain)
2381
2399
2382 def dodecompress(chunks):
2400 def dodecompress(chunks):
2383 decomp = r.decompress
2401 decomp = r.decompress
2384 for chunk in chunks:
2402 for chunk in chunks:
2385 decomp(chunk)
2403 decomp(chunk)
2386
2404
2387 def dopatch(text, bins):
2405 def dopatch(text, bins):
2388 if not cache:
2406 if not cache:
2389 r.clearcaches()
2407 r.clearcaches()
2390 mdiff.patches(text, bins)
2408 mdiff.patches(text, bins)
2391
2409
2392 def dohash(text):
2410 def dohash(text):
2393 if not cache:
2411 if not cache:
2394 r.clearcaches()
2412 r.clearcaches()
2395 r.checkhash(text, node, rev=rev)
2413 r.checkhash(text, node, rev=rev)
2396
2414
2397 def dorevision():
2415 def dorevision():
2398 if not cache:
2416 if not cache:
2399 r.clearcaches()
2417 r.clearcaches()
2400 r.revision(node)
2418 r.revision(node)
2401
2419
2402 try:
2420 try:
2403 from mercurial.revlogutils.deltas import slicechunk
2421 from mercurial.revlogutils.deltas import slicechunk
2404 except ImportError:
2422 except ImportError:
2405 slicechunk = getattr(revlog, '_slicechunk', None)
2423 slicechunk = getattr(revlog, '_slicechunk', None)
2406
2424
2407 size = r.length(rev)
2425 size = r.length(rev)
2408 chain = r._deltachain(rev)[0]
2426 chain = r._deltachain(rev)[0]
2409 if not getattr(r, '_withsparseread', False):
2427 if not getattr(r, '_withsparseread', False):
2410 slicedchain = (chain,)
2428 slicedchain = (chain,)
2411 else:
2429 else:
2412 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2430 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2413 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2431 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2414 rawchunks = getrawchunks(data, slicedchain)
2432 rawchunks = getrawchunks(data, slicedchain)
2415 bins = r._chunks(chain)
2433 bins = r._chunks(chain)
2416 text = bytes(bins[0])
2434 text = bytes(bins[0])
2417 bins = bins[1:]
2435 bins = bins[1:]
2418 text = mdiff.patches(text, bins)
2436 text = mdiff.patches(text, bins)
2419
2437
2420 benches = [
2438 benches = [
2421 (lambda: dorevision(), b'full'),
2439 (lambda: dorevision(), b'full'),
2422 (lambda: dodeltachain(rev), b'deltachain'),
2440 (lambda: dodeltachain(rev), b'deltachain'),
2423 (lambda: doread(chain), b'read'),
2441 (lambda: doread(chain), b'read'),
2424 ]
2442 ]
2425
2443
2426 if getattr(r, '_withsparseread', False):
2444 if getattr(r, '_withsparseread', False):
2427 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2445 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2428 benches.append(slicing)
2446 benches.append(slicing)
2429
2447
2430 benches.extend([
2448 benches.extend([
2431 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2449 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2432 (lambda: dodecompress(rawchunks), b'decompress'),
2450 (lambda: dodecompress(rawchunks), b'decompress'),
2433 (lambda: dopatch(text, bins), b'patch'),
2451 (lambda: dopatch(text, bins), b'patch'),
2434 (lambda: dohash(text), b'hash'),
2452 (lambda: dohash(text), b'hash'),
2435 ])
2453 ])
2436
2454
2437 timer, fm = gettimer(ui, opts)
2455 timer, fm = gettimer(ui, opts)
2438 for fn, title in benches:
2456 for fn, title in benches:
2439 timer(fn, title=title)
2457 timer(fn, title=title)
2440 fm.end()
2458 fm.end()
2441
2459
2442 @command(b'perfrevset',
2460 @command(b'perfrevset',
2443 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2461 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2444 (b'', b'contexts', False, b'obtain changectx for each revision')]
2462 (b'', b'contexts', False, b'obtain changectx for each revision')]
2445 + formatteropts, b"REVSET")
2463 + formatteropts, b"REVSET")
2446 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2464 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2447 """benchmark the execution time of a revset
2465 """benchmark the execution time of a revset
2448
2466
2449 Use the --clean option if need to evaluate the impact of build volatile
2467 Use the --clean option if need to evaluate the impact of build volatile
2450 revisions set cache on the revset execution. Volatile cache hold filtered
2468 revisions set cache on the revset execution. Volatile cache hold filtered
2451 and obsolete related cache."""
2469 and obsolete related cache."""
2452 opts = _byteskwargs(opts)
2470 opts = _byteskwargs(opts)
2453
2471
2454 timer, fm = gettimer(ui, opts)
2472 timer, fm = gettimer(ui, opts)
2455 def d():
2473 def d():
2456 if clear:
2474 if clear:
2457 repo.invalidatevolatilesets()
2475 repo.invalidatevolatilesets()
2458 if contexts:
2476 if contexts:
2459 for ctx in repo.set(expr): pass
2477 for ctx in repo.set(expr): pass
2460 else:
2478 else:
2461 for r in repo.revs(expr): pass
2479 for r in repo.revs(expr): pass
2462 timer(d)
2480 timer(d)
2463 fm.end()
2481 fm.end()
2464
2482
2465 @command(b'perfvolatilesets',
2483 @command(b'perfvolatilesets',
2466 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2484 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2467 ] + formatteropts)
2485 ] + formatteropts)
2468 def perfvolatilesets(ui, repo, *names, **opts):
2486 def perfvolatilesets(ui, repo, *names, **opts):
2469 """benchmark the computation of various volatile set
2487 """benchmark the computation of various volatile set
2470
2488
2471 Volatile set computes element related to filtering and obsolescence."""
2489 Volatile set computes element related to filtering and obsolescence."""
2472 opts = _byteskwargs(opts)
2490 opts = _byteskwargs(opts)
2473 timer, fm = gettimer(ui, opts)
2491 timer, fm = gettimer(ui, opts)
2474 repo = repo.unfiltered()
2492 repo = repo.unfiltered()
2475
2493
2476 def getobs(name):
2494 def getobs(name):
2477 def d():
2495 def d():
2478 repo.invalidatevolatilesets()
2496 repo.invalidatevolatilesets()
2479 if opts[b'clear_obsstore']:
2497 if opts[b'clear_obsstore']:
2480 clearfilecache(repo, b'obsstore')
2498 clearfilecache(repo, b'obsstore')
2481 obsolete.getrevs(repo, name)
2499 obsolete.getrevs(repo, name)
2482 return d
2500 return d
2483
2501
2484 allobs = sorted(obsolete.cachefuncs)
2502 allobs = sorted(obsolete.cachefuncs)
2485 if names:
2503 if names:
2486 allobs = [n for n in allobs if n in names]
2504 allobs = [n for n in allobs if n in names]
2487
2505
2488 for name in allobs:
2506 for name in allobs:
2489 timer(getobs(name), title=name)
2507 timer(getobs(name), title=name)
2490
2508
2491 def getfiltered(name):
2509 def getfiltered(name):
2492 def d():
2510 def d():
2493 repo.invalidatevolatilesets()
2511 repo.invalidatevolatilesets()
2494 if opts[b'clear_obsstore']:
2512 if opts[b'clear_obsstore']:
2495 clearfilecache(repo, b'obsstore')
2513 clearfilecache(repo, b'obsstore')
2496 repoview.filterrevs(repo, name)
2514 repoview.filterrevs(repo, name)
2497 return d
2515 return d
2498
2516
2499 allfilter = sorted(repoview.filtertable)
2517 allfilter = sorted(repoview.filtertable)
2500 if names:
2518 if names:
2501 allfilter = [n for n in allfilter if n in names]
2519 allfilter = [n for n in allfilter if n in names]
2502
2520
2503 for name in allfilter:
2521 for name in allfilter:
2504 timer(getfiltered(name), title=name)
2522 timer(getfiltered(name), title=name)
2505 fm.end()
2523 fm.end()
2506
2524
2507 @command(b'perfbranchmap',
2525 @command(b'perfbranchmap',
2508 [(b'f', b'full', False,
2526 [(b'f', b'full', False,
2509 b'Includes build time of subset'),
2527 b'Includes build time of subset'),
2510 (b'', b'clear-revbranch', False,
2528 (b'', b'clear-revbranch', False,
2511 b'purge the revbranch cache between computation'),
2529 b'purge the revbranch cache between computation'),
2512 ] + formatteropts)
2530 ] + formatteropts)
2513 def perfbranchmap(ui, repo, *filternames, **opts):
2531 def perfbranchmap(ui, repo, *filternames, **opts):
2514 """benchmark the update of a branchmap
2532 """benchmark the update of a branchmap
2515
2533
2516 This benchmarks the full repo.branchmap() call with read and write disabled
2534 This benchmarks the full repo.branchmap() call with read and write disabled
2517 """
2535 """
2518 opts = _byteskwargs(opts)
2536 opts = _byteskwargs(opts)
2519 full = opts.get(b"full", False)
2537 full = opts.get(b"full", False)
2520 clear_revbranch = opts.get(b"clear_revbranch", False)
2538 clear_revbranch = opts.get(b"clear_revbranch", False)
2521 timer, fm = gettimer(ui, opts)
2539 timer, fm = gettimer(ui, opts)
2522 def getbranchmap(filtername):
2540 def getbranchmap(filtername):
2523 """generate a benchmark function for the filtername"""
2541 """generate a benchmark function for the filtername"""
2524 if filtername is None:
2542 if filtername is None:
2525 view = repo
2543 view = repo
2526 else:
2544 else:
2527 view = repo.filtered(filtername)
2545 view = repo.filtered(filtername)
2528 if util.safehasattr(view._branchcaches, '_per_filter'):
2546 if util.safehasattr(view._branchcaches, '_per_filter'):
2529 filtered = view._branchcaches._per_filter
2547 filtered = view._branchcaches._per_filter
2530 else:
2548 else:
2531 # older versions
2549 # older versions
2532 filtered = view._branchcaches
2550 filtered = view._branchcaches
2533 def d():
2551 def d():
2534 if clear_revbranch:
2552 if clear_revbranch:
2535 repo.revbranchcache()._clear()
2553 repo.revbranchcache()._clear()
2536 if full:
2554 if full:
2537 view._branchcaches.clear()
2555 view._branchcaches.clear()
2538 else:
2556 else:
2539 filtered.pop(filtername, None)
2557 filtered.pop(filtername, None)
2540 view.branchmap()
2558 view.branchmap()
2541 return d
2559 return d
2542 # add filter in smaller subset to bigger subset
2560 # add filter in smaller subset to bigger subset
2543 possiblefilters = set(repoview.filtertable)
2561 possiblefilters = set(repoview.filtertable)
2544 if filternames:
2562 if filternames:
2545 possiblefilters &= set(filternames)
2563 possiblefilters &= set(filternames)
2546 subsettable = getbranchmapsubsettable()
2564 subsettable = getbranchmapsubsettable()
2547 allfilters = []
2565 allfilters = []
2548 while possiblefilters:
2566 while possiblefilters:
2549 for name in possiblefilters:
2567 for name in possiblefilters:
2550 subset = subsettable.get(name)
2568 subset = subsettable.get(name)
2551 if subset not in possiblefilters:
2569 if subset not in possiblefilters:
2552 break
2570 break
2553 else:
2571 else:
2554 assert False, b'subset cycle %s!' % possiblefilters
2572 assert False, b'subset cycle %s!' % possiblefilters
2555 allfilters.append(name)
2573 allfilters.append(name)
2556 possiblefilters.remove(name)
2574 possiblefilters.remove(name)
2557
2575
2558 # warm the cache
2576 # warm the cache
2559 if not full:
2577 if not full:
2560 for name in allfilters:
2578 for name in allfilters:
2561 repo.filtered(name).branchmap()
2579 repo.filtered(name).branchmap()
2562 if not filternames or b'unfiltered' in filternames:
2580 if not filternames or b'unfiltered' in filternames:
2563 # add unfiltered
2581 # add unfiltered
2564 allfilters.append(None)
2582 allfilters.append(None)
2565
2583
2566 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2584 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2567 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2585 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2568 branchcacheread.set(classmethod(lambda *args: None))
2586 branchcacheread.set(classmethod(lambda *args: None))
2569 else:
2587 else:
2570 # older versions
2588 # older versions
2571 branchcacheread = safeattrsetter(branchmap, b'read')
2589 branchcacheread = safeattrsetter(branchmap, b'read')
2572 branchcacheread.set(lambda *args: None)
2590 branchcacheread.set(lambda *args: None)
2573 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2591 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2574 branchcachewrite.set(lambda *args: None)
2592 branchcachewrite.set(lambda *args: None)
2575 try:
2593 try:
2576 for name in allfilters:
2594 for name in allfilters:
2577 printname = name
2595 printname = name
2578 if name is None:
2596 if name is None:
2579 printname = b'unfiltered'
2597 printname = b'unfiltered'
2580 timer(getbranchmap(name), title=str(printname))
2598 timer(getbranchmap(name), title=str(printname))
2581 finally:
2599 finally:
2582 branchcacheread.restore()
2600 branchcacheread.restore()
2583 branchcachewrite.restore()
2601 branchcachewrite.restore()
2584 fm.end()
2602 fm.end()
2585
2603
2586 @command(b'perfbranchmapupdate', [
2604 @command(b'perfbranchmapupdate', [
2587 (b'', b'base', [], b'subset of revision to start from'),
2605 (b'', b'base', [], b'subset of revision to start from'),
2588 (b'', b'target', [], b'subset of revision to end with'),
2606 (b'', b'target', [], b'subset of revision to end with'),
2589 (b'', b'clear-caches', False, b'clear cache between each runs')
2607 (b'', b'clear-caches', False, b'clear cache between each runs')
2590 ] + formatteropts)
2608 ] + formatteropts)
2591 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2609 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2592 """benchmark branchmap update from for <base> revs to <target> revs
2610 """benchmark branchmap update from for <base> revs to <target> revs
2593
2611
2594 If `--clear-caches` is passed, the following items will be reset before
2612 If `--clear-caches` is passed, the following items will be reset before
2595 each update:
2613 each update:
2596 * the changelog instance and associated indexes
2614 * the changelog instance and associated indexes
2597 * the rev-branch-cache instance
2615 * the rev-branch-cache instance
2598
2616
2599 Examples:
2617 Examples:
2600
2618
2601 # update for the one last revision
2619 # update for the one last revision
2602 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2620 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2603
2621
2604 $ update for change coming with a new branch
2622 $ update for change coming with a new branch
2605 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2623 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2606 """
2624 """
2607 from mercurial import branchmap
2625 from mercurial import branchmap
2608 from mercurial import repoview
2626 from mercurial import repoview
2609 opts = _byteskwargs(opts)
2627 opts = _byteskwargs(opts)
2610 timer, fm = gettimer(ui, opts)
2628 timer, fm = gettimer(ui, opts)
2611 clearcaches = opts[b'clear_caches']
2629 clearcaches = opts[b'clear_caches']
2612 unfi = repo.unfiltered()
2630 unfi = repo.unfiltered()
2613 x = [None] # used to pass data between closure
2631 x = [None] # used to pass data between closure
2614
2632
2615 # we use a `list` here to avoid possible side effect from smartset
2633 # we use a `list` here to avoid possible side effect from smartset
2616 baserevs = list(scmutil.revrange(repo, base))
2634 baserevs = list(scmutil.revrange(repo, base))
2617 targetrevs = list(scmutil.revrange(repo, target))
2635 targetrevs = list(scmutil.revrange(repo, target))
2618 if not baserevs:
2636 if not baserevs:
2619 raise error.Abort(b'no revisions selected for --base')
2637 raise error.Abort(b'no revisions selected for --base')
2620 if not targetrevs:
2638 if not targetrevs:
2621 raise error.Abort(b'no revisions selected for --target')
2639 raise error.Abort(b'no revisions selected for --target')
2622
2640
2623 # make sure the target branchmap also contains the one in the base
2641 # make sure the target branchmap also contains the one in the base
2624 targetrevs = list(set(baserevs) | set(targetrevs))
2642 targetrevs = list(set(baserevs) | set(targetrevs))
2625 targetrevs.sort()
2643 targetrevs.sort()
2626
2644
2627 cl = repo.changelog
2645 cl = repo.changelog
2628 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2646 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2629 allbaserevs.sort()
2647 allbaserevs.sort()
2630 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2648 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2631
2649
2632 newrevs = list(alltargetrevs.difference(allbaserevs))
2650 newrevs = list(alltargetrevs.difference(allbaserevs))
2633 newrevs.sort()
2651 newrevs.sort()
2634
2652
2635 allrevs = frozenset(unfi.changelog.revs())
2653 allrevs = frozenset(unfi.changelog.revs())
2636 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2654 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2637 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2655 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2638
2656
2639 def basefilter(repo, visibilityexceptions=None):
2657 def basefilter(repo, visibilityexceptions=None):
2640 return basefilterrevs
2658 return basefilterrevs
2641
2659
2642 def targetfilter(repo, visibilityexceptions=None):
2660 def targetfilter(repo, visibilityexceptions=None):
2643 return targetfilterrevs
2661 return targetfilterrevs
2644
2662
2645 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2663 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2646 ui.status(msg % (len(allbaserevs), len(newrevs)))
2664 ui.status(msg % (len(allbaserevs), len(newrevs)))
2647 if targetfilterrevs:
2665 if targetfilterrevs:
2648 msg = b'(%d revisions still filtered)\n'
2666 msg = b'(%d revisions still filtered)\n'
2649 ui.status(msg % len(targetfilterrevs))
2667 ui.status(msg % len(targetfilterrevs))
2650
2668
2651 try:
2669 try:
2652 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2670 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2653 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2671 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2654
2672
2655 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2673 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2656 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2674 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2657
2675
2658 # try to find an existing branchmap to reuse
2676 # try to find an existing branchmap to reuse
2659 subsettable = getbranchmapsubsettable()
2677 subsettable = getbranchmapsubsettable()
2660 candidatefilter = subsettable.get(None)
2678 candidatefilter = subsettable.get(None)
2661 while candidatefilter is not None:
2679 while candidatefilter is not None:
2662 candidatebm = repo.filtered(candidatefilter).branchmap()
2680 candidatebm = repo.filtered(candidatefilter).branchmap()
2663 if candidatebm.validfor(baserepo):
2681 if candidatebm.validfor(baserepo):
2664 filtered = repoview.filterrevs(repo, candidatefilter)
2682 filtered = repoview.filterrevs(repo, candidatefilter)
2665 missing = [r for r in allbaserevs if r in filtered]
2683 missing = [r for r in allbaserevs if r in filtered]
2666 base = candidatebm.copy()
2684 base = candidatebm.copy()
2667 base.update(baserepo, missing)
2685 base.update(baserepo, missing)
2668 break
2686 break
2669 candidatefilter = subsettable.get(candidatefilter)
2687 candidatefilter = subsettable.get(candidatefilter)
2670 else:
2688 else:
2671 # no suitable subset where found
2689 # no suitable subset where found
2672 base = branchmap.branchcache()
2690 base = branchmap.branchcache()
2673 base.update(baserepo, allbaserevs)
2691 base.update(baserepo, allbaserevs)
2674
2692
2675 def setup():
2693 def setup():
2676 x[0] = base.copy()
2694 x[0] = base.copy()
2677 if clearcaches:
2695 if clearcaches:
2678 unfi._revbranchcache = None
2696 unfi._revbranchcache = None
2679 clearchangelog(repo)
2697 clearchangelog(repo)
2680
2698
2681 def bench():
2699 def bench():
2682 x[0].update(targetrepo, newrevs)
2700 x[0].update(targetrepo, newrevs)
2683
2701
2684 timer(bench, setup=setup)
2702 timer(bench, setup=setup)
2685 fm.end()
2703 fm.end()
2686 finally:
2704 finally:
2687 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2705 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2688 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2706 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2689
2707
2690 @command(b'perfbranchmapload', [
2708 @command(b'perfbranchmapload', [
2691 (b'f', b'filter', b'', b'Specify repoview filter'),
2709 (b'f', b'filter', b'', b'Specify repoview filter'),
2692 (b'', b'list', False, b'List brachmap filter caches'),
2710 (b'', b'list', False, b'List brachmap filter caches'),
2693 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2711 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2694
2712
2695 ] + formatteropts)
2713 ] + formatteropts)
2696 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2714 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2697 """benchmark reading the branchmap"""
2715 """benchmark reading the branchmap"""
2698 opts = _byteskwargs(opts)
2716 opts = _byteskwargs(opts)
2699 clearrevlogs = opts[b'clear_revlogs']
2717 clearrevlogs = opts[b'clear_revlogs']
2700
2718
2701 if list:
2719 if list:
2702 for name, kind, st in repo.cachevfs.readdir(stat=True):
2720 for name, kind, st in repo.cachevfs.readdir(stat=True):
2703 if name.startswith(b'branch2'):
2721 if name.startswith(b'branch2'):
2704 filtername = name.partition(b'-')[2] or b'unfiltered'
2722 filtername = name.partition(b'-')[2] or b'unfiltered'
2705 ui.status(b'%s - %s\n'
2723 ui.status(b'%s - %s\n'
2706 % (filtername, util.bytecount(st.st_size)))
2724 % (filtername, util.bytecount(st.st_size)))
2707 return
2725 return
2708 if not filter:
2726 if not filter:
2709 filter = None
2727 filter = None
2710 subsettable = getbranchmapsubsettable()
2728 subsettable = getbranchmapsubsettable()
2711 if filter is None:
2729 if filter is None:
2712 repo = repo.unfiltered()
2730 repo = repo.unfiltered()
2713 else:
2731 else:
2714 repo = repoview.repoview(repo, filter)
2732 repo = repoview.repoview(repo, filter)
2715
2733
2716 repo.branchmap() # make sure we have a relevant, up to date branchmap
2734 repo.branchmap() # make sure we have a relevant, up to date branchmap
2717
2735
2718 try:
2736 try:
2719 fromfile = branchmap.branchcache.fromfile
2737 fromfile = branchmap.branchcache.fromfile
2720 except AttributeError:
2738 except AttributeError:
2721 # older versions
2739 # older versions
2722 fromfile = branchmap.read
2740 fromfile = branchmap.read
2723
2741
2724 currentfilter = filter
2742 currentfilter = filter
2725 # try once without timer, the filter may not be cached
2743 # try once without timer, the filter may not be cached
2726 while fromfile(repo) is None:
2744 while fromfile(repo) is None:
2727 currentfilter = subsettable.get(currentfilter)
2745 currentfilter = subsettable.get(currentfilter)
2728 if currentfilter is None:
2746 if currentfilter is None:
2729 raise error.Abort(b'No branchmap cached for %s repo'
2747 raise error.Abort(b'No branchmap cached for %s repo'
2730 % (filter or b'unfiltered'))
2748 % (filter or b'unfiltered'))
2731 repo = repo.filtered(currentfilter)
2749 repo = repo.filtered(currentfilter)
2732 timer, fm = gettimer(ui, opts)
2750 timer, fm = gettimer(ui, opts)
2733 def setup():
2751 def setup():
2734 if clearrevlogs:
2752 if clearrevlogs:
2735 clearchangelog(repo)
2753 clearchangelog(repo)
2736 def bench():
2754 def bench():
2737 fromfile(repo)
2755 fromfile(repo)
2738 timer(bench, setup=setup)
2756 timer(bench, setup=setup)
2739 fm.end()
2757 fm.end()
2740
2758
2741 @command(b'perfloadmarkers')
2759 @command(b'perfloadmarkers')
2742 def perfloadmarkers(ui, repo):
2760 def perfloadmarkers(ui, repo):
2743 """benchmark the time to parse the on-disk markers for a repo
2761 """benchmark the time to parse the on-disk markers for a repo
2744
2762
2745 Result is the number of markers in the repo."""
2763 Result is the number of markers in the repo."""
2746 timer, fm = gettimer(ui)
2764 timer, fm = gettimer(ui)
2747 svfs = getsvfs(repo)
2765 svfs = getsvfs(repo)
2748 timer(lambda: len(obsolete.obsstore(svfs)))
2766 timer(lambda: len(obsolete.obsstore(svfs)))
2749 fm.end()
2767 fm.end()
2750
2768
2751 @command(b'perflrucachedict', formatteropts +
2769 @command(b'perflrucachedict', formatteropts +
2752 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2770 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2753 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2771 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2754 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2772 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2755 (b'', b'size', 4, b'size of cache'),
2773 (b'', b'size', 4, b'size of cache'),
2756 (b'', b'gets', 10000, b'number of key lookups'),
2774 (b'', b'gets', 10000, b'number of key lookups'),
2757 (b'', b'sets', 10000, b'number of key sets'),
2775 (b'', b'sets', 10000, b'number of key sets'),
2758 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2776 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2759 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2777 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2760 norepo=True)
2778 norepo=True)
2761 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2779 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2762 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2780 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2763 opts = _byteskwargs(opts)
2781 opts = _byteskwargs(opts)
2764
2782
2765 def doinit():
2783 def doinit():
2766 for i in _xrange(10000):
2784 for i in _xrange(10000):
2767 util.lrucachedict(size)
2785 util.lrucachedict(size)
2768
2786
2769 costrange = list(range(mincost, maxcost + 1))
2787 costrange = list(range(mincost, maxcost + 1))
2770
2788
2771 values = []
2789 values = []
2772 for i in _xrange(size):
2790 for i in _xrange(size):
2773 values.append(random.randint(0, _maxint))
2791 values.append(random.randint(0, _maxint))
2774
2792
2775 # Get mode fills the cache and tests raw lookup performance with no
2793 # Get mode fills the cache and tests raw lookup performance with no
2776 # eviction.
2794 # eviction.
2777 getseq = []
2795 getseq = []
2778 for i in _xrange(gets):
2796 for i in _xrange(gets):
2779 getseq.append(random.choice(values))
2797 getseq.append(random.choice(values))
2780
2798
2781 def dogets():
2799 def dogets():
2782 d = util.lrucachedict(size)
2800 d = util.lrucachedict(size)
2783 for v in values:
2801 for v in values:
2784 d[v] = v
2802 d[v] = v
2785 for key in getseq:
2803 for key in getseq:
2786 value = d[key]
2804 value = d[key]
2787 value # silence pyflakes warning
2805 value # silence pyflakes warning
2788
2806
2789 def dogetscost():
2807 def dogetscost():
2790 d = util.lrucachedict(size, maxcost=costlimit)
2808 d = util.lrucachedict(size, maxcost=costlimit)
2791 for i, v in enumerate(values):
2809 for i, v in enumerate(values):
2792 d.insert(v, v, cost=costs[i])
2810 d.insert(v, v, cost=costs[i])
2793 for key in getseq:
2811 for key in getseq:
2794 try:
2812 try:
2795 value = d[key]
2813 value = d[key]
2796 value # silence pyflakes warning
2814 value # silence pyflakes warning
2797 except KeyError:
2815 except KeyError:
2798 pass
2816 pass
2799
2817
2800 # Set mode tests insertion speed with cache eviction.
2818 # Set mode tests insertion speed with cache eviction.
2801 setseq = []
2819 setseq = []
2802 costs = []
2820 costs = []
2803 for i in _xrange(sets):
2821 for i in _xrange(sets):
2804 setseq.append(random.randint(0, _maxint))
2822 setseq.append(random.randint(0, _maxint))
2805 costs.append(random.choice(costrange))
2823 costs.append(random.choice(costrange))
2806
2824
2807 def doinserts():
2825 def doinserts():
2808 d = util.lrucachedict(size)
2826 d = util.lrucachedict(size)
2809 for v in setseq:
2827 for v in setseq:
2810 d.insert(v, v)
2828 d.insert(v, v)
2811
2829
2812 def doinsertscost():
2830 def doinsertscost():
2813 d = util.lrucachedict(size, maxcost=costlimit)
2831 d = util.lrucachedict(size, maxcost=costlimit)
2814 for i, v in enumerate(setseq):
2832 for i, v in enumerate(setseq):
2815 d.insert(v, v, cost=costs[i])
2833 d.insert(v, v, cost=costs[i])
2816
2834
2817 def dosets():
2835 def dosets():
2818 d = util.lrucachedict(size)
2836 d = util.lrucachedict(size)
2819 for v in setseq:
2837 for v in setseq:
2820 d[v] = v
2838 d[v] = v
2821
2839
2822 # Mixed mode randomly performs gets and sets with eviction.
2840 # Mixed mode randomly performs gets and sets with eviction.
2823 mixedops = []
2841 mixedops = []
2824 for i in _xrange(mixed):
2842 for i in _xrange(mixed):
2825 r = random.randint(0, 100)
2843 r = random.randint(0, 100)
2826 if r < mixedgetfreq:
2844 if r < mixedgetfreq:
2827 op = 0
2845 op = 0
2828 else:
2846 else:
2829 op = 1
2847 op = 1
2830
2848
2831 mixedops.append((op,
2849 mixedops.append((op,
2832 random.randint(0, size * 2),
2850 random.randint(0, size * 2),
2833 random.choice(costrange)))
2851 random.choice(costrange)))
2834
2852
2835 def domixed():
2853 def domixed():
2836 d = util.lrucachedict(size)
2854 d = util.lrucachedict(size)
2837
2855
2838 for op, v, cost in mixedops:
2856 for op, v, cost in mixedops:
2839 if op == 0:
2857 if op == 0:
2840 try:
2858 try:
2841 d[v]
2859 d[v]
2842 except KeyError:
2860 except KeyError:
2843 pass
2861 pass
2844 else:
2862 else:
2845 d[v] = v
2863 d[v] = v
2846
2864
2847 def domixedcost():
2865 def domixedcost():
2848 d = util.lrucachedict(size, maxcost=costlimit)
2866 d = util.lrucachedict(size, maxcost=costlimit)
2849
2867
2850 for op, v, cost in mixedops:
2868 for op, v, cost in mixedops:
2851 if op == 0:
2869 if op == 0:
2852 try:
2870 try:
2853 d[v]
2871 d[v]
2854 except KeyError:
2872 except KeyError:
2855 pass
2873 pass
2856 else:
2874 else:
2857 d.insert(v, v, cost=cost)
2875 d.insert(v, v, cost=cost)
2858
2876
2859 benches = [
2877 benches = [
2860 (doinit, b'init'),
2878 (doinit, b'init'),
2861 ]
2879 ]
2862
2880
2863 if costlimit:
2881 if costlimit:
2864 benches.extend([
2882 benches.extend([
2865 (dogetscost, b'gets w/ cost limit'),
2883 (dogetscost, b'gets w/ cost limit'),
2866 (doinsertscost, b'inserts w/ cost limit'),
2884 (doinsertscost, b'inserts w/ cost limit'),
2867 (domixedcost, b'mixed w/ cost limit'),
2885 (domixedcost, b'mixed w/ cost limit'),
2868 ])
2886 ])
2869 else:
2887 else:
2870 benches.extend([
2888 benches.extend([
2871 (dogets, b'gets'),
2889 (dogets, b'gets'),
2872 (doinserts, b'inserts'),
2890 (doinserts, b'inserts'),
2873 (dosets, b'sets'),
2891 (dosets, b'sets'),
2874 (domixed, b'mixed')
2892 (domixed, b'mixed')
2875 ])
2893 ])
2876
2894
2877 for fn, title in benches:
2895 for fn, title in benches:
2878 timer, fm = gettimer(ui, opts)
2896 timer, fm = gettimer(ui, opts)
2879 timer(fn, title=title)
2897 timer(fn, title=title)
2880 fm.end()
2898 fm.end()
2881
2899
2882 @command(b'perfwrite', formatteropts)
2900 @command(b'perfwrite', formatteropts)
2883 def perfwrite(ui, repo, **opts):
2901 def perfwrite(ui, repo, **opts):
2884 """microbenchmark ui.write
2902 """microbenchmark ui.write
2885 """
2903 """
2886 opts = _byteskwargs(opts)
2904 opts = _byteskwargs(opts)
2887
2905
2888 timer, fm = gettimer(ui, opts)
2906 timer, fm = gettimer(ui, opts)
2889 def write():
2907 def write():
2890 for i in range(100000):
2908 for i in range(100000):
2891 ui.write((b'Testing write performance\n'))
2909 ui.write((b'Testing write performance\n'))
2892 timer(write)
2910 timer(write)
2893 fm.end()
2911 fm.end()
2894
2912
2895 def uisetup(ui):
2913 def uisetup(ui):
2896 if (util.safehasattr(cmdutil, b'openrevlog') and
2914 if (util.safehasattr(cmdutil, b'openrevlog') and
2897 not util.safehasattr(commands, b'debugrevlogopts')):
2915 not util.safehasattr(commands, b'debugrevlogopts')):
2898 # for "historical portability":
2916 # for "historical portability":
2899 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2917 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2900 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2918 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2901 # openrevlog() should cause failure, because it has been
2919 # openrevlog() should cause failure, because it has been
2902 # available since 3.5 (or 49c583ca48c4).
2920 # available since 3.5 (or 49c583ca48c4).
2903 def openrevlog(orig, repo, cmd, file_, opts):
2921 def openrevlog(orig, repo, cmd, file_, opts):
2904 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2922 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2905 raise error.Abort(b"This version doesn't support --dir option",
2923 raise error.Abort(b"This version doesn't support --dir option",
2906 hint=b"use 3.5 or later")
2924 hint=b"use 3.5 or later")
2907 return orig(repo, cmd, file_, opts)
2925 return orig(repo, cmd, file_, opts)
2908 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2926 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2909
2927
2910 @command(b'perfprogress', formatteropts + [
2928 @command(b'perfprogress', formatteropts + [
2911 (b'', b'topic', b'topic', b'topic for progress messages'),
2929 (b'', b'topic', b'topic', b'topic for progress messages'),
2912 (b'c', b'total', 1000000, b'total value we are progressing to'),
2930 (b'c', b'total', 1000000, b'total value we are progressing to'),
2913 ], norepo=True)
2931 ], norepo=True)
2914 def perfprogress(ui, topic=None, total=None, **opts):
2932 def perfprogress(ui, topic=None, total=None, **opts):
2915 """printing of progress bars"""
2933 """printing of progress bars"""
2916 opts = _byteskwargs(opts)
2934 opts = _byteskwargs(opts)
2917
2935
2918 timer, fm = gettimer(ui, opts)
2936 timer, fm = gettimer(ui, opts)
2919
2937
2920 def doprogress():
2938 def doprogress():
2921 with ui.makeprogress(topic, total=total) as progress:
2939 with ui.makeprogress(topic, total=total) as progress:
2922 for i in pycompat.xrange(total):
2940 for i in pycompat.xrange(total):
2923 progress.increment()
2941 progress.increment()
2924
2942
2925 timer(doprogress)
2943 timer(doprogress)
2926 fm.end()
2944 fm.end()
@@ -1,391 +1,393 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perfaddremove
81 perfaddremove
82 (no help text available)
82 (no help text available)
83 perfancestors
83 perfancestors
84 (no help text available)
84 (no help text available)
85 perfancestorset
85 perfancestorset
86 (no help text available)
86 (no help text available)
87 perfannotate (no help text available)
87 perfannotate (no help text available)
88 perfbdiff benchmark a bdiff between revisions
88 perfbdiff benchmark a bdiff between revisions
89 perfbookmarks
89 perfbookmarks
90 benchmark parsing bookmarks from disk to memory
90 benchmark parsing bookmarks from disk to memory
91 perfbranchmap
91 perfbranchmap
92 benchmark the update of a branchmap
92 benchmark the update of a branchmap
93 perfbranchmapload
93 perfbranchmapload
94 benchmark reading the branchmap
94 benchmark reading the branchmap
95 perfbranchmapupdate
95 perfbranchmapupdate
96 benchmark branchmap update from for <base> revs to <target>
96 benchmark branchmap update from for <base> revs to <target>
97 revs
97 revs
98 perfbundleread
98 perfbundleread
99 Benchmark reading of bundle files.
99 Benchmark reading of bundle files.
100 perfcca (no help text available)
100 perfcca (no help text available)
101 perfchangegroupchangelog
101 perfchangegroupchangelog
102 Benchmark producing a changelog group for a changegroup.
102 Benchmark producing a changelog group for a changegroup.
103 perfchangeset
103 perfchangeset
104 (no help text available)
104 (no help text available)
105 perfctxfiles (no help text available)
105 perfctxfiles (no help text available)
106 perfdiffwd Profile diff of working directory changes
106 perfdiffwd Profile diff of working directory changes
107 perfdirfoldmap
107 perfdirfoldmap
108 (no help text available)
108 (no help text available)
109 perfdirs (no help text available)
109 perfdirs (no help text available)
110 perfdirstate (no help text available)
110 perfdirstate (no help text available)
111 perfdirstatedirs
111 perfdirstatedirs
112 (no help text available)
112 (no help text available)
113 perfdirstatefoldmap
113 perfdirstatefoldmap
114 (no help text available)
114 (no help text available)
115 perfdirstatewrite
115 perfdirstatewrite
116 (no help text available)
116 (no help text available)
117 perfdiscovery
117 perfdiscovery
118 benchmark discovery between local repo and the peer at given
118 benchmark discovery between local repo and the peer at given
119 path
119 path
120 perffncacheencode
120 perffncacheencode
121 (no help text available)
121 (no help text available)
122 perffncacheload
122 perffncacheload
123 (no help text available)
123 (no help text available)
124 perffncachewrite
124 perffncachewrite
125 (no help text available)
125 (no help text available)
126 perfheads benchmark the computation of a changelog heads
126 perfheads benchmark the computation of a changelog heads
127 perfhelper-pathcopies
127 perfhelper-pathcopies
128 find statistic about potential parameters for the
128 find statistic about potential parameters for the
129 'perftracecopies'
129 'perftracecopies'
130 perfignore benchmark operation related to computing ignore
130 perfignore benchmark operation related to computing ignore
131 perfindex benchmark index creation time followed by a lookup
131 perfindex benchmark index creation time followed by a lookup
132 perflinelogedits
132 perflinelogedits
133 (no help text available)
133 (no help text available)
134 perfloadmarkers
134 perfloadmarkers
135 benchmark the time to parse the on-disk markers for a repo
135 benchmark the time to parse the on-disk markers for a repo
136 perflog (no help text available)
136 perflog (no help text available)
137 perflookup (no help text available)
137 perflookup (no help text available)
138 perflrucachedict
138 perflrucachedict
139 (no help text available)
139 (no help text available)
140 perfmanifest benchmark the time to read a manifest from disk and return a
140 perfmanifest benchmark the time to read a manifest from disk and return a
141 usable
141 usable
142 perfmergecalculate
142 perfmergecalculate
143 (no help text available)
143 (no help text available)
144 perfmergecopies
145 measure runtime of 'copies.mergecopies'
144 perfmoonwalk benchmark walking the changelog backwards
146 perfmoonwalk benchmark walking the changelog backwards
145 perfnodelookup
147 perfnodelookup
146 (no help text available)
148 (no help text available)
147 perfnodemap benchmark the time necessary to look up revision from a cold
149 perfnodemap benchmark the time necessary to look up revision from a cold
148 nodemap
150 nodemap
149 perfparents benchmark the time necessary to fetch one changeset's parents.
151 perfparents benchmark the time necessary to fetch one changeset's parents.
150 perfpathcopies
152 perfpathcopies
151 benchmark the copy tracing logic
153 benchmark the copy tracing logic
152 perfphases benchmark phasesets computation
154 perfphases benchmark phasesets computation
153 perfphasesremote
155 perfphasesremote
154 benchmark time needed to analyse phases of the remote server
156 benchmark time needed to analyse phases of the remote server
155 perfprogress printing of progress bars
157 perfprogress printing of progress bars
156 perfrawfiles (no help text available)
158 perfrawfiles (no help text available)
157 perfrevlogchunks
159 perfrevlogchunks
158 Benchmark operations on revlog chunks.
160 Benchmark operations on revlog chunks.
159 perfrevlogindex
161 perfrevlogindex
160 Benchmark operations against a revlog index.
162 Benchmark operations against a revlog index.
161 perfrevlogrevision
163 perfrevlogrevision
162 Benchmark obtaining a revlog revision.
164 Benchmark obtaining a revlog revision.
163 perfrevlogrevisions
165 perfrevlogrevisions
164 Benchmark reading a series of revisions from a revlog.
166 Benchmark reading a series of revisions from a revlog.
165 perfrevlogwrite
167 perfrevlogwrite
166 Benchmark writing a series of revisions to a revlog.
168 Benchmark writing a series of revisions to a revlog.
167 perfrevrange (no help text available)
169 perfrevrange (no help text available)
168 perfrevset benchmark the execution time of a revset
170 perfrevset benchmark the execution time of a revset
169 perfstartup (no help text available)
171 perfstartup (no help text available)
170 perfstatus (no help text available)
172 perfstatus (no help text available)
171 perftags (no help text available)
173 perftags (no help text available)
172 perftemplating
174 perftemplating
173 test the rendering time of a given template
175 test the rendering time of a given template
174 perfunidiff benchmark a unified diff between revisions
176 perfunidiff benchmark a unified diff between revisions
175 perfvolatilesets
177 perfvolatilesets
176 benchmark the computation of various volatile set
178 benchmark the computation of various volatile set
177 perfwalk (no help text available)
179 perfwalk (no help text available)
178 perfwrite microbenchmark ui.write
180 perfwrite microbenchmark ui.write
179
181
180 (use 'hg help -v perf' to show built-in aliases and global options)
182 (use 'hg help -v perf' to show built-in aliases and global options)
181 $ hg perfaddremove
183 $ hg perfaddremove
182 $ hg perfancestors
184 $ hg perfancestors
183 $ hg perfancestorset 2
185 $ hg perfancestorset 2
184 $ hg perfannotate a
186 $ hg perfannotate a
185 $ hg perfbdiff -c 1
187 $ hg perfbdiff -c 1
186 $ hg perfbdiff --alldata 1
188 $ hg perfbdiff --alldata 1
187 $ hg perfunidiff -c 1
189 $ hg perfunidiff -c 1
188 $ hg perfunidiff --alldata 1
190 $ hg perfunidiff --alldata 1
189 $ hg perfbookmarks
191 $ hg perfbookmarks
190 $ hg perfbranchmap
192 $ hg perfbranchmap
191 $ hg perfbranchmapload
193 $ hg perfbranchmapload
192 $ hg perfbranchmapupdate --base "not tip" --target "tip"
194 $ hg perfbranchmapupdate --base "not tip" --target "tip"
193 benchmark of branchmap with 3 revisions with 1 new ones
195 benchmark of branchmap with 3 revisions with 1 new ones
194 $ hg perfcca
196 $ hg perfcca
195 $ hg perfchangegroupchangelog
197 $ hg perfchangegroupchangelog
196 $ hg perfchangegroupchangelog --cgversion 01
198 $ hg perfchangegroupchangelog --cgversion 01
197 $ hg perfchangeset 2
199 $ hg perfchangeset 2
198 $ hg perfctxfiles 2
200 $ hg perfctxfiles 2
199 $ hg perfdiffwd
201 $ hg perfdiffwd
200 $ hg perfdirfoldmap
202 $ hg perfdirfoldmap
201 $ hg perfdirs
203 $ hg perfdirs
202 $ hg perfdirstate
204 $ hg perfdirstate
203 $ hg perfdirstatedirs
205 $ hg perfdirstatedirs
204 $ hg perfdirstatefoldmap
206 $ hg perfdirstatefoldmap
205 $ hg perfdirstatewrite
207 $ hg perfdirstatewrite
206 #if repofncache
208 #if repofncache
207 $ hg perffncacheencode
209 $ hg perffncacheencode
208 $ hg perffncacheload
210 $ hg perffncacheload
209 $ hg debugrebuildfncache
211 $ hg debugrebuildfncache
210 fncache already up to date
212 fncache already up to date
211 $ hg perffncachewrite
213 $ hg perffncachewrite
212 $ hg debugrebuildfncache
214 $ hg debugrebuildfncache
213 fncache already up to date
215 fncache already up to date
214 #endif
216 #endif
215 $ hg perfheads
217 $ hg perfheads
216 $ hg perfignore
218 $ hg perfignore
217 $ hg perfindex
219 $ hg perfindex
218 $ hg perflinelogedits -n 1
220 $ hg perflinelogedits -n 1
219 $ hg perfloadmarkers
221 $ hg perfloadmarkers
220 $ hg perflog
222 $ hg perflog
221 $ hg perflookup 2
223 $ hg perflookup 2
222 $ hg perflrucache
224 $ hg perflrucache
223 $ hg perfmanifest 2
225 $ hg perfmanifest 2
224 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
226 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
225 $ hg perfmanifest -m 44fe2c8352bb
227 $ hg perfmanifest -m 44fe2c8352bb
226 abort: manifest revision must be integer or full node
228 abort: manifest revision must be integer or full node
227 [255]
229 [255]
228 $ hg perfmergecalculate -r 3
230 $ hg perfmergecalculate -r 3
229 $ hg perfmoonwalk
231 $ hg perfmoonwalk
230 $ hg perfnodelookup 2
232 $ hg perfnodelookup 2
231 $ hg perfpathcopies 1 2
233 $ hg perfpathcopies 1 2
232 $ hg perfprogress --total 1000
234 $ hg perfprogress --total 1000
233 $ hg perfrawfiles 2
235 $ hg perfrawfiles 2
234 $ hg perfrevlogindex -c
236 $ hg perfrevlogindex -c
235 #if reporevlogstore
237 #if reporevlogstore
236 $ hg perfrevlogrevisions .hg/store/data/a.i
238 $ hg perfrevlogrevisions .hg/store/data/a.i
237 #endif
239 #endif
238 $ hg perfrevlogrevision -m 0
240 $ hg perfrevlogrevision -m 0
239 $ hg perfrevlogchunks -c
241 $ hg perfrevlogchunks -c
240 $ hg perfrevrange
242 $ hg perfrevrange
241 $ hg perfrevset 'all()'
243 $ hg perfrevset 'all()'
242 $ hg perfstartup
244 $ hg perfstartup
243 $ hg perfstatus
245 $ hg perfstatus
244 $ hg perftags
246 $ hg perftags
245 $ hg perftemplating
247 $ hg perftemplating
246 $ hg perfvolatilesets
248 $ hg perfvolatilesets
247 $ hg perfwalk
249 $ hg perfwalk
248 $ hg perfparents
250 $ hg perfparents
249 $ hg perfdiscovery -q .
251 $ hg perfdiscovery -q .
250
252
251 Test run control
253 Test run control
252 ----------------
254 ----------------
253
255
254 Simple single entry
256 Simple single entry
255
257
256 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
258 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
257 ! wall * comb * user * sys * (best of 15) (glob)
259 ! wall * comb * user * sys * (best of 15) (glob)
258
260
259 Multiple entries
261 Multiple entries
260
262
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
263 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
262 ! wall * comb * user * sys * (best of 5) (glob)
264 ! wall * comb * user * sys * (best of 5) (glob)
263
265
264 error case are ignored
266 error case are ignored
265
267
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
268 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
267 malformatted run limit entry, missing "-": 500
269 malformatted run limit entry, missing "-": 500
268 ! wall * comb * user * sys * (best of 5) (glob)
270 ! wall * comb * user * sys * (best of 5) (glob)
269 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
270 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
272 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
271 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
273 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
272 ! wall * comb * user * sys * (best of 5) (glob)
274 ! wall * comb * user * sys * (best of 5) (glob)
273 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
275 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
274 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
276 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
275 ! wall * comb * user * sys * (best of 5) (glob)
277 ! wall * comb * user * sys * (best of 5) (glob)
276
278
277 test actual output
279 test actual output
278 ------------------
280 ------------------
279
281
280 normal output:
282 normal output:
281
283
282 $ hg perfheads --config perf.stub=no
284 $ hg perfheads --config perf.stub=no
283 ! wall * comb * user * sys * (best of *) (glob)
285 ! wall * comb * user * sys * (best of *) (glob)
284
286
285 detailed output:
287 detailed output:
286
288
287 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
289 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
288 ! wall * comb * user * sys * (best of *) (glob)
290 ! wall * comb * user * sys * (best of *) (glob)
289 ! wall * comb * user * sys * (max of *) (glob)
291 ! wall * comb * user * sys * (max of *) (glob)
290 ! wall * comb * user * sys * (avg of *) (glob)
292 ! wall * comb * user * sys * (avg of *) (glob)
291 ! wall * comb * user * sys * (median of *) (glob)
293 ! wall * comb * user * sys * (median of *) (glob)
292
294
293 test json output
295 test json output
294 ----------------
296 ----------------
295
297
296 normal output:
298 normal output:
297
299
298 $ hg perfheads --template json --config perf.stub=no
300 $ hg perfheads --template json --config perf.stub=no
299 [
301 [
300 {
302 {
301 "comb": *, (glob)
303 "comb": *, (glob)
302 "count": *, (glob)
304 "count": *, (glob)
303 "sys": *, (glob)
305 "sys": *, (glob)
304 "user": *, (glob)
306 "user": *, (glob)
305 "wall": * (glob)
307 "wall": * (glob)
306 }
308 }
307 ]
309 ]
308
310
309 detailed output:
311 detailed output:
310
312
311 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
313 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
312 [
314 [
313 {
315 {
314 "avg.comb": *, (glob)
316 "avg.comb": *, (glob)
315 "avg.count": *, (glob)
317 "avg.count": *, (glob)
316 "avg.sys": *, (glob)
318 "avg.sys": *, (glob)
317 "avg.user": *, (glob)
319 "avg.user": *, (glob)
318 "avg.wall": *, (glob)
320 "avg.wall": *, (glob)
319 "comb": *, (glob)
321 "comb": *, (glob)
320 "count": *, (glob)
322 "count": *, (glob)
321 "max.comb": *, (glob)
323 "max.comb": *, (glob)
322 "max.count": *, (glob)
324 "max.count": *, (glob)
323 "max.sys": *, (glob)
325 "max.sys": *, (glob)
324 "max.user": *, (glob)
326 "max.user": *, (glob)
325 "max.wall": *, (glob)
327 "max.wall": *, (glob)
326 "median.comb": *, (glob)
328 "median.comb": *, (glob)
327 "median.count": *, (glob)
329 "median.count": *, (glob)
328 "median.sys": *, (glob)
330 "median.sys": *, (glob)
329 "median.user": *, (glob)
331 "median.user": *, (glob)
330 "median.wall": *, (glob)
332 "median.wall": *, (glob)
331 "sys": *, (glob)
333 "sys": *, (glob)
332 "user": *, (glob)
334 "user": *, (glob)
333 "wall": * (glob)
335 "wall": * (glob)
334 }
336 }
335 ]
337 ]
336
338
337 Test pre-run feature
339 Test pre-run feature
338 --------------------
340 --------------------
339
341
340 (perf discovery has some spurious output)
342 (perf discovery has some spurious output)
341
343
342 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
344 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
343 ! wall * comb * user * sys * (best of 1) (glob)
345 ! wall * comb * user * sys * (best of 1) (glob)
344 searching for changes
346 searching for changes
345 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
346 ! wall * comb * user * sys * (best of 1) (glob)
348 ! wall * comb * user * sys * (best of 1) (glob)
347 searching for changes
349 searching for changes
348 searching for changes
350 searching for changes
349 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
351 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
350 ! wall * comb * user * sys * (best of 1) (glob)
352 ! wall * comb * user * sys * (best of 1) (glob)
351 searching for changes
353 searching for changes
352 searching for changes
354 searching for changes
353 searching for changes
355 searching for changes
354 searching for changes
356 searching for changes
355
357
356 test profile-benchmark option
358 test profile-benchmark option
357 ------------------------------
359 ------------------------------
358
360
359 Function to check that statprof ran
361 Function to check that statprof ran
360 $ statprofran () {
362 $ statprofran () {
361 > egrep 'Sample count:|No samples recorded' > /dev/null
363 > egrep 'Sample count:|No samples recorded' > /dev/null
362 > }
364 > }
363 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
365 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
364
366
365 Check perf.py for historical portability
367 Check perf.py for historical portability
366 ----------------------------------------
368 ----------------------------------------
367
369
368 $ cd "$TESTDIR/.."
370 $ cd "$TESTDIR/.."
369
371
370 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
372 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
371 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
373 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
372 > "$TESTDIR"/check-perf-code.py contrib/perf.py
374 > "$TESTDIR"/check-perf-code.py contrib/perf.py
373 contrib/perf.py:\d+: (re)
375 contrib/perf.py:\d+: (re)
374 > from mercurial import (
376 > from mercurial import (
375 import newer module separately in try clause for early Mercurial
377 import newer module separately in try clause for early Mercurial
376 contrib/perf.py:\d+: (re)
378 contrib/perf.py:\d+: (re)
377 > from mercurial import (
379 > from mercurial import (
378 import newer module separately in try clause for early Mercurial
380 import newer module separately in try clause for early Mercurial
379 contrib/perf.py:\d+: (re)
381 contrib/perf.py:\d+: (re)
380 > origindexpath = orig.opener.join(orig.indexfile)
382 > origindexpath = orig.opener.join(orig.indexfile)
381 use getvfs()/getsvfs() for early Mercurial
383 use getvfs()/getsvfs() for early Mercurial
382 contrib/perf.py:\d+: (re)
384 contrib/perf.py:\d+: (re)
383 > origdatapath = orig.opener.join(orig.datafile)
385 > origdatapath = orig.opener.join(orig.datafile)
384 use getvfs()/getsvfs() for early Mercurial
386 use getvfs()/getsvfs() for early Mercurial
385 contrib/perf.py:\d+: (re)
387 contrib/perf.py:\d+: (re)
386 > vfs = vfsmod.vfs(tmpdir)
388 > vfs = vfsmod.vfs(tmpdir)
387 use getvfs()/getsvfs() for early Mercurial
389 use getvfs()/getsvfs() for early Mercurial
388 contrib/perf.py:\d+: (re)
390 contrib/perf.py:\d+: (re)
389 > vfs.options = getattr(orig.opener, 'options', None)
391 > vfs.options = getattr(orig.opener, 'options', None)
390 use getvfs()/getsvfs() for early Mercurial
392 use getvfs()/getsvfs() for early Mercurial
391 [1]
393 [1]
General Comments 0
You need to be logged in to leave comments. Login now