##// END OF EJS Templates
perf: add a `perfhelper-mergecopies` command...
marmoute -
r42577:21c436a3 default
parent child Browse files
Show More
@@ -1,2944 +1,3049 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 if pycompat.ispy3:
131 if pycompat.ispy3:
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 else:
133 else:
134 _maxint = sys.maxint
134 _maxint = sys.maxint
135 except (ImportError, AttributeError):
135 except (ImportError, AttributeError):
136 import inspect
136 import inspect
137 getargspec = inspect.getargspec
137 getargspec = inspect.getargspec
138 _byteskwargs = identity
138 _byteskwargs = identity
139 fsencode = identity # no py3 support
139 fsencode = identity # no py3 support
140 _maxint = sys.maxint # no py3 support
140 _maxint = sys.maxint # no py3 support
141 _sysstr = lambda x: x # no py3 support
141 _sysstr = lambda x: x # no py3 support
142 _xrange = xrange
142 _xrange = xrange
143
143
144 try:
144 try:
145 # 4.7+
145 # 4.7+
146 queue = pycompat.queue.Queue
146 queue = pycompat.queue.Queue
147 except (AttributeError, ImportError):
147 except (AttributeError, ImportError):
148 # <4.7.
148 # <4.7.
149 try:
149 try:
150 queue = pycompat.queue
150 queue = pycompat.queue
151 except (AttributeError, ImportError):
151 except (AttributeError, ImportError):
152 queue = util.queue
152 queue = util.queue
153
153
154 try:
154 try:
155 from mercurial import logcmdutil
155 from mercurial import logcmdutil
156 makelogtemplater = logcmdutil.maketemplater
156 makelogtemplater = logcmdutil.maketemplater
157 except (AttributeError, ImportError):
157 except (AttributeError, ImportError):
158 try:
158 try:
159 makelogtemplater = cmdutil.makelogtemplater
159 makelogtemplater = cmdutil.makelogtemplater
160 except (AttributeError, ImportError):
160 except (AttributeError, ImportError):
161 makelogtemplater = None
161 makelogtemplater = None
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # define util.safehasattr forcibly, because util.safehasattr has been
164 # define util.safehasattr forcibly, because util.safehasattr has been
165 # available since 1.9.3 (or 94b200a11cf7)
165 # available since 1.9.3 (or 94b200a11cf7)
166 _undefined = object()
166 _undefined = object()
167 def safehasattr(thing, attr):
167 def safehasattr(thing, attr):
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 setattr(util, 'safehasattr', safehasattr)
169 setattr(util, 'safehasattr', safehasattr)
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.timer forcibly, because util.timer has been available
172 # define util.timer forcibly, because util.timer has been available
173 # since ae5d60bb70c9
173 # since ae5d60bb70c9
174 if safehasattr(time, 'perf_counter'):
174 if safehasattr(time, 'perf_counter'):
175 util.timer = time.perf_counter
175 util.timer = time.perf_counter
176 elif os.name == b'nt':
176 elif os.name == b'nt':
177 util.timer = time.clock
177 util.timer = time.clock
178 else:
178 else:
179 util.timer = time.time
179 util.timer = time.time
180
180
181 # for "historical portability":
181 # for "historical portability":
182 # use locally defined empty option list, if formatteropts isn't
182 # use locally defined empty option list, if formatteropts isn't
183 # available, because commands.formatteropts has been available since
183 # available, because commands.formatteropts has been available since
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 # available since 2.2 (or ae5f92e154d3)
185 # available since 2.2 (or ae5f92e154d3)
186 formatteropts = getattr(cmdutil, "formatteropts",
186 formatteropts = getattr(cmdutil, "formatteropts",
187 getattr(commands, "formatteropts", []))
187 getattr(commands, "formatteropts", []))
188
188
189 # for "historical portability":
189 # for "historical portability":
190 # use locally defined option list, if debugrevlogopts isn't available,
190 # use locally defined option list, if debugrevlogopts isn't available,
191 # because commands.debugrevlogopts has been available since 3.7 (or
191 # because commands.debugrevlogopts has been available since 3.7 (or
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 # since 1.9 (or a79fea6b3e77).
193 # since 1.9 (or a79fea6b3e77).
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 getattr(commands, "debugrevlogopts", [
195 getattr(commands, "debugrevlogopts", [
196 (b'c', b'changelog', False, (b'open changelog')),
196 (b'c', b'changelog', False, (b'open changelog')),
197 (b'm', b'manifest', False, (b'open manifest')),
197 (b'm', b'manifest', False, (b'open manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
199 ]))
199 ]))
200
200
201 cmdtable = {}
201 cmdtable = {}
202
202
203 # for "historical portability":
203 # for "historical portability":
204 # define parsealiases locally, because cmdutil.parsealiases has been
204 # define parsealiases locally, because cmdutil.parsealiases has been
205 # available since 1.5 (or 6252852b4332)
205 # available since 1.5 (or 6252852b4332)
206 def parsealiases(cmd):
206 def parsealiases(cmd):
207 return cmd.split(b"|")
207 return cmd.split(b"|")
208
208
209 if safehasattr(registrar, 'command'):
209 if safehasattr(registrar, 'command'):
210 command = registrar.command(cmdtable)
210 command = registrar.command(cmdtable)
211 elif safehasattr(cmdutil, 'command'):
211 elif safehasattr(cmdutil, 'command'):
212 command = cmdutil.command(cmdtable)
212 command = cmdutil.command(cmdtable)
213 if b'norepo' not in getargspec(command).args:
213 if b'norepo' not in getargspec(command).args:
214 # for "historical portability":
214 # for "historical portability":
215 # wrap original cmdutil.command, because "norepo" option has
215 # wrap original cmdutil.command, because "norepo" option has
216 # been available since 3.1 (or 75a96326cecb)
216 # been available since 3.1 (or 75a96326cecb)
217 _command = command
217 _command = command
218 def command(name, options=(), synopsis=None, norepo=False):
218 def command(name, options=(), synopsis=None, norepo=False):
219 if norepo:
219 if norepo:
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 return _command(name, list(options), synopsis)
221 return _command(name, list(options), synopsis)
222 else:
222 else:
223 # for "historical portability":
223 # for "historical portability":
224 # define "@command" annotation locally, because cmdutil.command
224 # define "@command" annotation locally, because cmdutil.command
225 # has been available since 1.9 (or 2daa5179e73f)
225 # has been available since 1.9 (or 2daa5179e73f)
226 def command(name, options=(), synopsis=None, norepo=False):
226 def command(name, options=(), synopsis=None, norepo=False):
227 def decorator(func):
227 def decorator(func):
228 if synopsis:
228 if synopsis:
229 cmdtable[name] = func, list(options), synopsis
229 cmdtable[name] = func, list(options), synopsis
230 else:
230 else:
231 cmdtable[name] = func, list(options)
231 cmdtable[name] = func, list(options)
232 if norepo:
232 if norepo:
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 return func
234 return func
235 return decorator
235 return decorator
236
236
237 try:
237 try:
238 import mercurial.registrar
238 import mercurial.registrar
239 import mercurial.configitems
239 import mercurial.configitems
240 configtable = {}
240 configtable = {}
241 configitem = mercurial.registrar.configitem(configtable)
241 configitem = mercurial.registrar.configitem(configtable)
242 configitem(b'perf', b'presleep',
242 configitem(b'perf', b'presleep',
243 default=mercurial.configitems.dynamicdefault,
243 default=mercurial.configitems.dynamicdefault,
244 )
244 )
245 configitem(b'perf', b'stub',
245 configitem(b'perf', b'stub',
246 default=mercurial.configitems.dynamicdefault,
246 default=mercurial.configitems.dynamicdefault,
247 )
247 )
248 configitem(b'perf', b'parentscount',
248 configitem(b'perf', b'parentscount',
249 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
250 )
250 )
251 configitem(b'perf', b'all-timing',
251 configitem(b'perf', b'all-timing',
252 default=mercurial.configitems.dynamicdefault,
252 default=mercurial.configitems.dynamicdefault,
253 )
253 )
254 configitem(b'perf', b'pre-run',
254 configitem(b'perf', b'pre-run',
255 default=mercurial.configitems.dynamicdefault,
255 default=mercurial.configitems.dynamicdefault,
256 )
256 )
257 configitem(b'perf', b'profile-benchmark',
257 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
258 default=mercurial.configitems.dynamicdefault,
259 )
259 )
260 configitem(b'perf', b'run-limits',
260 configitem(b'perf', b'run-limits',
261 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
262 )
262 )
263 except (ImportError, AttributeError):
263 except (ImportError, AttributeError):
264 pass
264 pass
265
265
266 def getlen(ui):
266 def getlen(ui):
267 if ui.configbool(b"perf", b"stub", False):
267 if ui.configbool(b"perf", b"stub", False):
268 return lambda x: 1
268 return lambda x: 1
269 return len
269 return len
270
270
271 class noop(object):
271 class noop(object):
272 """dummy context manager"""
272 """dummy context manager"""
273 def __enter__(self):
273 def __enter__(self):
274 pass
274 pass
275 def __exit__(self, *args):
275 def __exit__(self, *args):
276 pass
276 pass
277
277
278 NOOPCTX = noop()
278 NOOPCTX = noop()
279
279
280 def gettimer(ui, opts=None):
280 def gettimer(ui, opts=None):
281 """return a timer function and formatter: (timer, formatter)
281 """return a timer function and formatter: (timer, formatter)
282
282
283 This function exists to gather the creation of formatter in a single
283 This function exists to gather the creation of formatter in a single
284 place instead of duplicating it in all performance commands."""
284 place instead of duplicating it in all performance commands."""
285
285
286 # enforce an idle period before execution to counteract power management
286 # enforce an idle period before execution to counteract power management
287 # experimental config: perf.presleep
287 # experimental config: perf.presleep
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
289
289
290 if opts is None:
290 if opts is None:
291 opts = {}
291 opts = {}
292 # redirect all to stderr unless buffer api is in use
292 # redirect all to stderr unless buffer api is in use
293 if not ui._buffers:
293 if not ui._buffers:
294 ui = ui.copy()
294 ui = ui.copy()
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 if uifout:
296 if uifout:
297 # for "historical portability":
297 # for "historical portability":
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 uifout.set(ui.ferr)
299 uifout.set(ui.ferr)
300
300
301 # get a formatter
301 # get a formatter
302 uiformatter = getattr(ui, 'formatter', None)
302 uiformatter = getattr(ui, 'formatter', None)
303 if uiformatter:
303 if uiformatter:
304 fm = uiformatter(b'perf', opts)
304 fm = uiformatter(b'perf', opts)
305 else:
305 else:
306 # for "historical portability":
306 # for "historical portability":
307 # define formatter locally, because ui.formatter has been
307 # define formatter locally, because ui.formatter has been
308 # available since 2.2 (or ae5f92e154d3)
308 # available since 2.2 (or ae5f92e154d3)
309 from mercurial import node
309 from mercurial import node
310 class defaultformatter(object):
310 class defaultformatter(object):
311 """Minimized composition of baseformatter and plainformatter
311 """Minimized composition of baseformatter and plainformatter
312 """
312 """
313 def __init__(self, ui, topic, opts):
313 def __init__(self, ui, topic, opts):
314 self._ui = ui
314 self._ui = ui
315 if ui.debugflag:
315 if ui.debugflag:
316 self.hexfunc = node.hex
316 self.hexfunc = node.hex
317 else:
317 else:
318 self.hexfunc = node.short
318 self.hexfunc = node.short
319 def __nonzero__(self):
319 def __nonzero__(self):
320 return False
320 return False
321 __bool__ = __nonzero__
321 __bool__ = __nonzero__
322 def startitem(self):
322 def startitem(self):
323 pass
323 pass
324 def data(self, **data):
324 def data(self, **data):
325 pass
325 pass
326 def write(self, fields, deftext, *fielddata, **opts):
326 def write(self, fields, deftext, *fielddata, **opts):
327 self._ui.write(deftext % fielddata, **opts)
327 self._ui.write(deftext % fielddata, **opts)
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 if cond:
329 if cond:
330 self._ui.write(deftext % fielddata, **opts)
330 self._ui.write(deftext % fielddata, **opts)
331 def plain(self, text, **opts):
331 def plain(self, text, **opts):
332 self._ui.write(text, **opts)
332 self._ui.write(text, **opts)
333 def end(self):
333 def end(self):
334 pass
334 pass
335 fm = defaultformatter(ui, b'perf', opts)
335 fm = defaultformatter(ui, b'perf', opts)
336
336
337 # stub function, runs code only once instead of in a loop
337 # stub function, runs code only once instead of in a loop
338 # experimental config: perf.stub
338 # experimental config: perf.stub
339 if ui.configbool(b"perf", b"stub", False):
339 if ui.configbool(b"perf", b"stub", False):
340 return functools.partial(stub_timer, fm), fm
340 return functools.partial(stub_timer, fm), fm
341
341
342 # experimental config: perf.all-timing
342 # experimental config: perf.all-timing
343 displayall = ui.configbool(b"perf", b"all-timing", False)
343 displayall = ui.configbool(b"perf", b"all-timing", False)
344
344
345 # experimental config: perf.run-limits
345 # experimental config: perf.run-limits
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 limits = []
347 limits = []
348 for item in limitspec:
348 for item in limitspec:
349 parts = item.split(b'-', 1)
349 parts = item.split(b'-', 1)
350 if len(parts) < 2:
350 if len(parts) < 2:
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 % item))
352 % item))
353 continue
353 continue
354 try:
354 try:
355 time_limit = float(pycompat.sysstr(parts[0]))
355 time_limit = float(pycompat.sysstr(parts[0]))
356 except ValueError as e:
356 except ValueError as e:
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 % (pycompat.bytestr(e), item)))
358 % (pycompat.bytestr(e), item)))
359 continue
359 continue
360 try:
360 try:
361 run_limit = int(pycompat.sysstr(parts[1]))
361 run_limit = int(pycompat.sysstr(parts[1]))
362 except ValueError as e:
362 except ValueError as e:
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 % (pycompat.bytestr(e), item)))
364 % (pycompat.bytestr(e), item)))
365 continue
365 continue
366 limits.append((time_limit, run_limit))
366 limits.append((time_limit, run_limit))
367 if not limits:
367 if not limits:
368 limits = DEFAULTLIMITS
368 limits = DEFAULTLIMITS
369
369
370 profiler = None
370 profiler = None
371 if profiling is not None:
371 if profiling is not None:
372 if ui.configbool(b"perf", b"profile-benchmark", False):
372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 profiler = profiling.profile(ui)
373 profiler = profiling.profile(ui)
374
374
375 prerun = getint(ui, b"perf", b"pre-run", 0)
375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 prerun=prerun, profiler=profiler)
377 prerun=prerun, profiler=profiler)
378 return t, fm
378 return t, fm
379
379
380 def stub_timer(fm, func, setup=None, title=None):
380 def stub_timer(fm, func, setup=None, title=None):
381 if setup is not None:
381 if setup is not None:
382 setup()
382 setup()
383 func()
383 func()
384
384
385 @contextlib.contextmanager
385 @contextlib.contextmanager
386 def timeone():
386 def timeone():
387 r = []
387 r = []
388 ostart = os.times()
388 ostart = os.times()
389 cstart = util.timer()
389 cstart = util.timer()
390 yield r
390 yield r
391 cstop = util.timer()
391 cstop = util.timer()
392 ostop = os.times()
392 ostop = os.times()
393 a, b = ostart, ostop
393 a, b = ostart, ostop
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395
395
396
396
397 # list of stop condition (elapsed time, minimal run count)
397 # list of stop condition (elapsed time, minimal run count)
398 DEFAULTLIMITS = (
398 DEFAULTLIMITS = (
399 (3.0, 100),
399 (3.0, 100),
400 (10.0, 3),
400 (10.0, 3),
401 )
401 )
402
402
403 def _timer(fm, func, setup=None, title=None, displayall=False,
403 def _timer(fm, func, setup=None, title=None, displayall=False,
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 gc.collect()
405 gc.collect()
406 results = []
406 results = []
407 begin = util.timer()
407 begin = util.timer()
408 count = 0
408 count = 0
409 if profiler is None:
409 if profiler is None:
410 profiler = NOOPCTX
410 profiler = NOOPCTX
411 for i in range(prerun):
411 for i in range(prerun):
412 if setup is not None:
412 if setup is not None:
413 setup()
413 setup()
414 func()
414 func()
415 keepgoing = True
415 keepgoing = True
416 while keepgoing:
416 while keepgoing:
417 if setup is not None:
417 if setup is not None:
418 setup()
418 setup()
419 with profiler:
419 with profiler:
420 with timeone() as item:
420 with timeone() as item:
421 r = func()
421 r = func()
422 profiler = NOOPCTX
422 profiler = NOOPCTX
423 count += 1
423 count += 1
424 results.append(item[0])
424 results.append(item[0])
425 cstop = util.timer()
425 cstop = util.timer()
426 # Look for a stop condition.
426 # Look for a stop condition.
427 elapsed = cstop - begin
427 elapsed = cstop - begin
428 for t, mincount in limits:
428 for t, mincount in limits:
429 if elapsed >= t and count >= mincount:
429 if elapsed >= t and count >= mincount:
430 keepgoing = False
430 keepgoing = False
431 break
431 break
432
432
433 formatone(fm, results, title=title, result=r,
433 formatone(fm, results, title=title, result=r,
434 displayall=displayall)
434 displayall=displayall)
435
435
436 def formatone(fm, timings, title=None, result=None, displayall=False):
436 def formatone(fm, timings, title=None, result=None, displayall=False):
437
437
438 count = len(timings)
438 count = len(timings)
439
439
440 fm.startitem()
440 fm.startitem()
441
441
442 if title:
442 if title:
443 fm.write(b'title', b'! %s\n', title)
443 fm.write(b'title', b'! %s\n', title)
444 if result:
444 if result:
445 fm.write(b'result', b'! result: %s\n', result)
445 fm.write(b'result', b'! result: %s\n', result)
446 def display(role, entry):
446 def display(role, entry):
447 prefix = b''
447 prefix = b''
448 if role != b'best':
448 if role != b'best':
449 prefix = b'%s.' % role
449 prefix = b'%s.' % role
450 fm.plain(b'!')
450 fm.plain(b'!')
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 fm.write(prefix + b'user', b' user %f', entry[1])
453 fm.write(prefix + b'user', b' user %f', entry[1])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 fm.plain(b'\n')
456 fm.plain(b'\n')
457 timings.sort()
457 timings.sort()
458 min_val = timings[0]
458 min_val = timings[0]
459 display(b'best', min_val)
459 display(b'best', min_val)
460 if displayall:
460 if displayall:
461 max_val = timings[-1]
461 max_val = timings[-1]
462 display(b'max', max_val)
462 display(b'max', max_val)
463 avg = tuple([sum(x) / count for x in zip(*timings)])
463 avg = tuple([sum(x) / count for x in zip(*timings)])
464 display(b'avg', avg)
464 display(b'avg', avg)
465 median = timings[len(timings) // 2]
465 median = timings[len(timings) // 2]
466 display(b'median', median)
466 display(b'median', median)
467
467
468 # utilities for historical portability
468 # utilities for historical portability
469
469
470 def getint(ui, section, name, default):
470 def getint(ui, section, name, default):
471 # for "historical portability":
471 # for "historical portability":
472 # ui.configint has been available since 1.9 (or fa2b596db182)
472 # ui.configint has been available since 1.9 (or fa2b596db182)
473 v = ui.config(section, name, None)
473 v = ui.config(section, name, None)
474 if v is None:
474 if v is None:
475 return default
475 return default
476 try:
476 try:
477 return int(v)
477 return int(v)
478 except ValueError:
478 except ValueError:
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 % (section, name, v))
480 % (section, name, v))
481
481
482 def safeattrsetter(obj, name, ignoremissing=False):
482 def safeattrsetter(obj, name, ignoremissing=False):
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484
484
485 This function is aborted, if 'obj' doesn't have 'name' attribute
485 This function is aborted, if 'obj' doesn't have 'name' attribute
486 at runtime. This avoids overlooking removal of an attribute, which
486 at runtime. This avoids overlooking removal of an attribute, which
487 breaks assumption of performance measurement, in the future.
487 breaks assumption of performance measurement, in the future.
488
488
489 This function returns the object to (1) assign a new value, and
489 This function returns the object to (1) assign a new value, and
490 (2) restore an original value to the attribute.
490 (2) restore an original value to the attribute.
491
491
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 abortion, and this function returns None. This is useful to
493 abortion, and this function returns None. This is useful to
494 examine an attribute, which isn't ensured in all Mercurial
494 examine an attribute, which isn't ensured in all Mercurial
495 versions.
495 versions.
496 """
496 """
497 if not util.safehasattr(obj, name):
497 if not util.safehasattr(obj, name):
498 if ignoremissing:
498 if ignoremissing:
499 return None
499 return None
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 b" of performance measurement") % (name, obj))
501 b" of performance measurement") % (name, obj))
502
502
503 origvalue = getattr(obj, _sysstr(name))
503 origvalue = getattr(obj, _sysstr(name))
504 class attrutil(object):
504 class attrutil(object):
505 def set(self, newvalue):
505 def set(self, newvalue):
506 setattr(obj, _sysstr(name), newvalue)
506 setattr(obj, _sysstr(name), newvalue)
507 def restore(self):
507 def restore(self):
508 setattr(obj, _sysstr(name), origvalue)
508 setattr(obj, _sysstr(name), origvalue)
509
509
510 return attrutil()
510 return attrutil()
511
511
512 # utilities to examine each internal API changes
512 # utilities to examine each internal API changes
513
513
514 def getbranchmapsubsettable():
514 def getbranchmapsubsettable():
515 # for "historical portability":
515 # for "historical portability":
516 # subsettable is defined in:
516 # subsettable is defined in:
517 # - branchmap since 2.9 (or 175c6fd8cacc)
517 # - branchmap since 2.9 (or 175c6fd8cacc)
518 # - repoview since 2.5 (or 59a9f18d4587)
518 # - repoview since 2.5 (or 59a9f18d4587)
519 # - repoviewutil since 5.0
519 # - repoviewutil since 5.0
520 for mod in (branchmap, repoview, repoviewutil):
520 for mod in (branchmap, repoview, repoviewutil):
521 subsettable = getattr(mod, 'subsettable', None)
521 subsettable = getattr(mod, 'subsettable', None)
522 if subsettable:
522 if subsettable:
523 return subsettable
523 return subsettable
524
524
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 # branchmap and repoview modules exist, but subsettable attribute
526 # branchmap and repoview modules exist, but subsettable attribute
527 # doesn't)
527 # doesn't)
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 hint=b"use 2.5 or later")
529 hint=b"use 2.5 or later")
530
530
531 def getsvfs(repo):
531 def getsvfs(repo):
532 """Return appropriate object to access files under .hg/store
532 """Return appropriate object to access files under .hg/store
533 """
533 """
534 # for "historical portability":
534 # for "historical portability":
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 svfs = getattr(repo, 'svfs', None)
536 svfs = getattr(repo, 'svfs', None)
537 if svfs:
537 if svfs:
538 return svfs
538 return svfs
539 else:
539 else:
540 return getattr(repo, 'sopener')
540 return getattr(repo, 'sopener')
541
541
542 def getvfs(repo):
542 def getvfs(repo):
543 """Return appropriate object to access files under .hg
543 """Return appropriate object to access files under .hg
544 """
544 """
545 # for "historical portability":
545 # for "historical portability":
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 vfs = getattr(repo, 'vfs', None)
547 vfs = getattr(repo, 'vfs', None)
548 if vfs:
548 if vfs:
549 return vfs
549 return vfs
550 else:
550 else:
551 return getattr(repo, 'opener')
551 return getattr(repo, 'opener')
552
552
553 def repocleartagscachefunc(repo):
553 def repocleartagscachefunc(repo):
554 """Return the function to clear tags cache according to repo internal API
554 """Return the function to clear tags cache according to repo internal API
555 """
555 """
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 # correct way to clear tags cache, because existing code paths
558 # correct way to clear tags cache, because existing code paths
559 # expect _tagscache to be a structured object.
559 # expect _tagscache to be a structured object.
560 def clearcache():
560 def clearcache():
561 # _tagscache has been filteredpropertycache since 2.5 (or
561 # _tagscache has been filteredpropertycache since 2.5 (or
562 # 98c867ac1330), and delattr() can't work in such case
562 # 98c867ac1330), and delattr() can't work in such case
563 if b'_tagscache' in vars(repo):
563 if b'_tagscache' in vars(repo):
564 del repo.__dict__[b'_tagscache']
564 del repo.__dict__[b'_tagscache']
565 return clearcache
565 return clearcache
566
566
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 if repotags: # since 1.4 (or 5614a628d173)
568 if repotags: # since 1.4 (or 5614a628d173)
569 return lambda : repotags.set(None)
569 return lambda : repotags.set(None)
570
570
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
573 return lambda : repotagscache.set(None)
573 return lambda : repotagscache.set(None)
574
574
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 # this point, but it isn't so problematic, because:
576 # this point, but it isn't so problematic, because:
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 # in perftags() causes failure soon
578 # in perftags() causes failure soon
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 raise error.Abort((b"tags API of this hg command is unknown"))
580 raise error.Abort((b"tags API of this hg command is unknown"))
581
581
582 # utilities to clear cache
582 # utilities to clear cache
583
583
584 def clearfilecache(obj, attrname):
584 def clearfilecache(obj, attrname):
585 unfiltered = getattr(obj, 'unfiltered', None)
585 unfiltered = getattr(obj, 'unfiltered', None)
586 if unfiltered is not None:
586 if unfiltered is not None:
587 obj = obj.unfiltered()
587 obj = obj.unfiltered()
588 if attrname in vars(obj):
588 if attrname in vars(obj):
589 delattr(obj, attrname)
589 delattr(obj, attrname)
590 obj._filecache.pop(attrname, None)
590 obj._filecache.pop(attrname, None)
591
591
592 def clearchangelog(repo):
592 def clearchangelog(repo):
593 if repo is not repo.unfiltered():
593 if repo is not repo.unfiltered():
594 object.__setattr__(repo, r'_clcachekey', None)
594 object.__setattr__(repo, r'_clcachekey', None)
595 object.__setattr__(repo, r'_clcache', None)
595 object.__setattr__(repo, r'_clcache', None)
596 clearfilecache(repo.unfiltered(), 'changelog')
596 clearfilecache(repo.unfiltered(), 'changelog')
597
597
598 # perf commands
598 # perf commands
599
599
600 @command(b'perfwalk', formatteropts)
600 @command(b'perfwalk', formatteropts)
601 def perfwalk(ui, repo, *pats, **opts):
601 def perfwalk(ui, repo, *pats, **opts):
602 opts = _byteskwargs(opts)
602 opts = _byteskwargs(opts)
603 timer, fm = gettimer(ui, opts)
603 timer, fm = gettimer(ui, opts)
604 m = scmutil.match(repo[None], pats, {})
604 m = scmutil.match(repo[None], pats, {})
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 ignored=False))))
606 ignored=False))))
607 fm.end()
607 fm.end()
608
608
609 @command(b'perfannotate', formatteropts)
609 @command(b'perfannotate', formatteropts)
610 def perfannotate(ui, repo, f, **opts):
610 def perfannotate(ui, repo, f, **opts):
611 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
612 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
613 fc = repo[b'.'][f]
613 fc = repo[b'.'][f]
614 timer(lambda: len(fc.annotate(True)))
614 timer(lambda: len(fc.annotate(True)))
615 fm.end()
615 fm.end()
616
616
617 @command(b'perfstatus',
617 @command(b'perfstatus',
618 [(b'u', b'unknown', False,
618 [(b'u', b'unknown', False,
619 b'ask status to look for unknown files')] + formatteropts)
619 b'ask status to look for unknown files')] + formatteropts)
620 def perfstatus(ui, repo, **opts):
620 def perfstatus(ui, repo, **opts):
621 opts = _byteskwargs(opts)
621 opts = _byteskwargs(opts)
622 #m = match.always(repo.root, repo.getcwd())
622 #m = match.always(repo.root, repo.getcwd())
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 # False))))
624 # False))))
625 timer, fm = gettimer(ui, opts)
625 timer, fm = gettimer(ui, opts)
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 fm.end()
627 fm.end()
628
628
629 @command(b'perfaddremove', formatteropts)
629 @command(b'perfaddremove', formatteropts)
630 def perfaddremove(ui, repo, **opts):
630 def perfaddremove(ui, repo, **opts):
631 opts = _byteskwargs(opts)
631 opts = _byteskwargs(opts)
632 timer, fm = gettimer(ui, opts)
632 timer, fm = gettimer(ui, opts)
633 try:
633 try:
634 oldquiet = repo.ui.quiet
634 oldquiet = repo.ui.quiet
635 repo.ui.quiet = True
635 repo.ui.quiet = True
636 matcher = scmutil.match(repo[None])
636 matcher = scmutil.match(repo[None])
637 opts[b'dry_run'] = True
637 opts[b'dry_run'] = True
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 uipathfn = scmutil.getuipathfn(repo)
639 uipathfn = scmutil.getuipathfn(repo)
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 else:
641 else:
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 finally:
643 finally:
644 repo.ui.quiet = oldquiet
644 repo.ui.quiet = oldquiet
645 fm.end()
645 fm.end()
646
646
647 def clearcaches(cl):
647 def clearcaches(cl):
648 # behave somewhat consistently across internal API changes
648 # behave somewhat consistently across internal API changes
649 if util.safehasattr(cl, b'clearcaches'):
649 if util.safehasattr(cl, b'clearcaches'):
650 cl.clearcaches()
650 cl.clearcaches()
651 elif util.safehasattr(cl, b'_nodecache'):
651 elif util.safehasattr(cl, b'_nodecache'):
652 from mercurial.node import nullid, nullrev
652 from mercurial.node import nullid, nullrev
653 cl._nodecache = {nullid: nullrev}
653 cl._nodecache = {nullid: nullrev}
654 cl._nodepos = None
654 cl._nodepos = None
655
655
656 @command(b'perfheads', formatteropts)
656 @command(b'perfheads', formatteropts)
657 def perfheads(ui, repo, **opts):
657 def perfheads(ui, repo, **opts):
658 """benchmark the computation of a changelog heads"""
658 """benchmark the computation of a changelog heads"""
659 opts = _byteskwargs(opts)
659 opts = _byteskwargs(opts)
660 timer, fm = gettimer(ui, opts)
660 timer, fm = gettimer(ui, opts)
661 cl = repo.changelog
661 cl = repo.changelog
662 def s():
662 def s():
663 clearcaches(cl)
663 clearcaches(cl)
664 def d():
664 def d():
665 len(cl.headrevs())
665 len(cl.headrevs())
666 timer(d, setup=s)
666 timer(d, setup=s)
667 fm.end()
667 fm.end()
668
668
669 @command(b'perftags', formatteropts+
669 @command(b'perftags', formatteropts+
670 [
670 [
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 ])
672 ])
673 def perftags(ui, repo, **opts):
673 def perftags(ui, repo, **opts):
674 opts = _byteskwargs(opts)
674 opts = _byteskwargs(opts)
675 timer, fm = gettimer(ui, opts)
675 timer, fm = gettimer(ui, opts)
676 repocleartagscache = repocleartagscachefunc(repo)
676 repocleartagscache = repocleartagscachefunc(repo)
677 clearrevlogs = opts[b'clear_revlogs']
677 clearrevlogs = opts[b'clear_revlogs']
678 def s():
678 def s():
679 if clearrevlogs:
679 if clearrevlogs:
680 clearchangelog(repo)
680 clearchangelog(repo)
681 clearfilecache(repo.unfiltered(), 'manifest')
681 clearfilecache(repo.unfiltered(), 'manifest')
682 repocleartagscache()
682 repocleartagscache()
683 def t():
683 def t():
684 return len(repo.tags())
684 return len(repo.tags())
685 timer(t, setup=s)
685 timer(t, setup=s)
686 fm.end()
686 fm.end()
687
687
688 @command(b'perfancestors', formatteropts)
688 @command(b'perfancestors', formatteropts)
689 def perfancestors(ui, repo, **opts):
689 def perfancestors(ui, repo, **opts):
690 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
691 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
692 heads = repo.changelog.headrevs()
692 heads = repo.changelog.headrevs()
693 def d():
693 def d():
694 for a in repo.changelog.ancestors(heads):
694 for a in repo.changelog.ancestors(heads):
695 pass
695 pass
696 timer(d)
696 timer(d)
697 fm.end()
697 fm.end()
698
698
699 @command(b'perfancestorset', formatteropts)
699 @command(b'perfancestorset', formatteropts)
700 def perfancestorset(ui, repo, revset, **opts):
700 def perfancestorset(ui, repo, revset, **opts):
701 opts = _byteskwargs(opts)
701 opts = _byteskwargs(opts)
702 timer, fm = gettimer(ui, opts)
702 timer, fm = gettimer(ui, opts)
703 revs = repo.revs(revset)
703 revs = repo.revs(revset)
704 heads = repo.changelog.headrevs()
704 heads = repo.changelog.headrevs()
705 def d():
705 def d():
706 s = repo.changelog.ancestors(heads)
706 s = repo.changelog.ancestors(heads)
707 for rev in revs:
707 for rev in revs:
708 rev in s
708 rev in s
709 timer(d)
709 timer(d)
710 fm.end()
710 fm.end()
711
711
712 @command(b'perfdiscovery', formatteropts, b'PATH')
712 @command(b'perfdiscovery', formatteropts, b'PATH')
713 def perfdiscovery(ui, repo, path, **opts):
713 def perfdiscovery(ui, repo, path, **opts):
714 """benchmark discovery between local repo and the peer at given path
714 """benchmark discovery between local repo and the peer at given path
715 """
715 """
716 repos = [repo, None]
716 repos = [repo, None]
717 timer, fm = gettimer(ui, opts)
717 timer, fm = gettimer(ui, opts)
718 path = ui.expandpath(path)
718 path = ui.expandpath(path)
719
719
720 def s():
720 def s():
721 repos[1] = hg.peer(ui, opts, path)
721 repos[1] = hg.peer(ui, opts, path)
722 def d():
722 def d():
723 setdiscovery.findcommonheads(ui, *repos)
723 setdiscovery.findcommonheads(ui, *repos)
724 timer(d, setup=s)
724 timer(d, setup=s)
725 fm.end()
725 fm.end()
726
726
727 @command(b'perfbookmarks', formatteropts +
727 @command(b'perfbookmarks', formatteropts +
728 [
728 [
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 ])
730 ])
731 def perfbookmarks(ui, repo, **opts):
731 def perfbookmarks(ui, repo, **opts):
732 """benchmark parsing bookmarks from disk to memory"""
732 """benchmark parsing bookmarks from disk to memory"""
733 opts = _byteskwargs(opts)
733 opts = _byteskwargs(opts)
734 timer, fm = gettimer(ui, opts)
734 timer, fm = gettimer(ui, opts)
735
735
736 clearrevlogs = opts[b'clear_revlogs']
736 clearrevlogs = opts[b'clear_revlogs']
737 def s():
737 def s():
738 if clearrevlogs:
738 if clearrevlogs:
739 clearchangelog(repo)
739 clearchangelog(repo)
740 clearfilecache(repo, b'_bookmarks')
740 clearfilecache(repo, b'_bookmarks')
741 def d():
741 def d():
742 repo._bookmarks
742 repo._bookmarks
743 timer(d, setup=s)
743 timer(d, setup=s)
744 fm.end()
744 fm.end()
745
745
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 def perfbundleread(ui, repo, bundlepath, **opts):
747 def perfbundleread(ui, repo, bundlepath, **opts):
748 """Benchmark reading of bundle files.
748 """Benchmark reading of bundle files.
749
749
750 This command is meant to isolate the I/O part of bundle reading as
750 This command is meant to isolate the I/O part of bundle reading as
751 much as possible.
751 much as possible.
752 """
752 """
753 from mercurial import (
753 from mercurial import (
754 bundle2,
754 bundle2,
755 exchange,
755 exchange,
756 streamclone,
756 streamclone,
757 )
757 )
758
758
759 opts = _byteskwargs(opts)
759 opts = _byteskwargs(opts)
760
760
761 def makebench(fn):
761 def makebench(fn):
762 def run():
762 def run():
763 with open(bundlepath, b'rb') as fh:
763 with open(bundlepath, b'rb') as fh:
764 bundle = exchange.readbundle(ui, fh, bundlepath)
764 bundle = exchange.readbundle(ui, fh, bundlepath)
765 fn(bundle)
765 fn(bundle)
766
766
767 return run
767 return run
768
768
769 def makereadnbytes(size):
769 def makereadnbytes(size):
770 def run():
770 def run():
771 with open(bundlepath, b'rb') as fh:
771 with open(bundlepath, b'rb') as fh:
772 bundle = exchange.readbundle(ui, fh, bundlepath)
772 bundle = exchange.readbundle(ui, fh, bundlepath)
773 while bundle.read(size):
773 while bundle.read(size):
774 pass
774 pass
775
775
776 return run
776 return run
777
777
778 def makestdioread(size):
778 def makestdioread(size):
779 def run():
779 def run():
780 with open(bundlepath, b'rb') as fh:
780 with open(bundlepath, b'rb') as fh:
781 while fh.read(size):
781 while fh.read(size):
782 pass
782 pass
783
783
784 return run
784 return run
785
785
786 # bundle1
786 # bundle1
787
787
788 def deltaiter(bundle):
788 def deltaiter(bundle):
789 for delta in bundle.deltaiter():
789 for delta in bundle.deltaiter():
790 pass
790 pass
791
791
792 def iterchunks(bundle):
792 def iterchunks(bundle):
793 for chunk in bundle.getchunks():
793 for chunk in bundle.getchunks():
794 pass
794 pass
795
795
796 # bundle2
796 # bundle2
797
797
798 def forwardchunks(bundle):
798 def forwardchunks(bundle):
799 for chunk in bundle._forwardchunks():
799 for chunk in bundle._forwardchunks():
800 pass
800 pass
801
801
802 def iterparts(bundle):
802 def iterparts(bundle):
803 for part in bundle.iterparts():
803 for part in bundle.iterparts():
804 pass
804 pass
805
805
806 def iterpartsseekable(bundle):
806 def iterpartsseekable(bundle):
807 for part in bundle.iterparts(seekable=True):
807 for part in bundle.iterparts(seekable=True):
808 pass
808 pass
809
809
810 def seek(bundle):
810 def seek(bundle):
811 for part in bundle.iterparts(seekable=True):
811 for part in bundle.iterparts(seekable=True):
812 part.seek(0, os.SEEK_END)
812 part.seek(0, os.SEEK_END)
813
813
814 def makepartreadnbytes(size):
814 def makepartreadnbytes(size):
815 def run():
815 def run():
816 with open(bundlepath, b'rb') as fh:
816 with open(bundlepath, b'rb') as fh:
817 bundle = exchange.readbundle(ui, fh, bundlepath)
817 bundle = exchange.readbundle(ui, fh, bundlepath)
818 for part in bundle.iterparts():
818 for part in bundle.iterparts():
819 while part.read(size):
819 while part.read(size):
820 pass
820 pass
821
821
822 return run
822 return run
823
823
824 benches = [
824 benches = [
825 (makestdioread(8192), b'read(8k)'),
825 (makestdioread(8192), b'read(8k)'),
826 (makestdioread(16384), b'read(16k)'),
826 (makestdioread(16384), b'read(16k)'),
827 (makestdioread(32768), b'read(32k)'),
827 (makestdioread(32768), b'read(32k)'),
828 (makestdioread(131072), b'read(128k)'),
828 (makestdioread(131072), b'read(128k)'),
829 ]
829 ]
830
830
831 with open(bundlepath, b'rb') as fh:
831 with open(bundlepath, b'rb') as fh:
832 bundle = exchange.readbundle(ui, fh, bundlepath)
832 bundle = exchange.readbundle(ui, fh, bundlepath)
833
833
834 if isinstance(bundle, changegroup.cg1unpacker):
834 if isinstance(bundle, changegroup.cg1unpacker):
835 benches.extend([
835 benches.extend([
836 (makebench(deltaiter), b'cg1 deltaiter()'),
836 (makebench(deltaiter), b'cg1 deltaiter()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
842 ])
842 ])
843 elif isinstance(bundle, bundle2.unbundle20):
843 elif isinstance(bundle, bundle2.unbundle20):
844 benches.extend([
844 benches.extend([
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 (makebench(seek), b'bundle2 part seek()'),
848 (makebench(seek), b'bundle2 part seek()'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 ])
853 ])
854 elif isinstance(bundle, streamclone.streamcloneapplier):
854 elif isinstance(bundle, streamclone.streamcloneapplier):
855 raise error.Abort(b'stream clone bundles not supported')
855 raise error.Abort(b'stream clone bundles not supported')
856 else:
856 else:
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858
858
859 for fn, title in benches:
859 for fn, title in benches:
860 timer, fm = gettimer(ui, opts)
860 timer, fm = gettimer(ui, opts)
861 timer(fn, title=title)
861 timer(fn, title=title)
862 fm.end()
862 fm.end()
863
863
864 @command(b'perfchangegroupchangelog', formatteropts +
864 @command(b'perfchangegroupchangelog', formatteropts +
865 [(b'', b'cgversion', b'02', b'changegroup version'),
865 [(b'', b'cgversion', b'02', b'changegroup version'),
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 """Benchmark producing a changelog group for a changegroup.
868 """Benchmark producing a changelog group for a changegroup.
869
869
870 This measures the time spent processing the changelog during a
870 This measures the time spent processing the changelog during a
871 bundle operation. This occurs during `hg bundle` and on a server
871 bundle operation. This occurs during `hg bundle` and on a server
872 processing a `getbundle` wire protocol request (handles clones
872 processing a `getbundle` wire protocol request (handles clones
873 and pull requests).
873 and pull requests).
874
874
875 By default, all revisions are added to the changegroup.
875 By default, all revisions are added to the changegroup.
876 """
876 """
877 opts = _byteskwargs(opts)
877 opts = _byteskwargs(opts)
878 cl = repo.changelog
878 cl = repo.changelog
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 bundler = changegroup.getbundler(cgversion, repo)
880 bundler = changegroup.getbundler(cgversion, repo)
881
881
882 def d():
882 def d():
883 state, chunks = bundler._generatechangelog(cl, nodes)
883 state, chunks = bundler._generatechangelog(cl, nodes)
884 for chunk in chunks:
884 for chunk in chunks:
885 pass
885 pass
886
886
887 timer, fm = gettimer(ui, opts)
887 timer, fm = gettimer(ui, opts)
888
888
889 # Terminal printing can interfere with timing. So disable it.
889 # Terminal printing can interfere with timing. So disable it.
890 with ui.configoverride({(b'progress', b'disable'): True}):
890 with ui.configoverride({(b'progress', b'disable'): True}):
891 timer(d)
891 timer(d)
892
892
893 fm.end()
893 fm.end()
894
894
895 @command(b'perfdirs', formatteropts)
895 @command(b'perfdirs', formatteropts)
896 def perfdirs(ui, repo, **opts):
896 def perfdirs(ui, repo, **opts):
897 opts = _byteskwargs(opts)
897 opts = _byteskwargs(opts)
898 timer, fm = gettimer(ui, opts)
898 timer, fm = gettimer(ui, opts)
899 dirstate = repo.dirstate
899 dirstate = repo.dirstate
900 b'a' in dirstate
900 b'a' in dirstate
901 def d():
901 def d():
902 dirstate.hasdir(b'a')
902 dirstate.hasdir(b'a')
903 del dirstate._map._dirs
903 del dirstate._map._dirs
904 timer(d)
904 timer(d)
905 fm.end()
905 fm.end()
906
906
907 @command(b'perfdirstate', formatteropts)
907 @command(b'perfdirstate', formatteropts)
908 def perfdirstate(ui, repo, **opts):
908 def perfdirstate(ui, repo, **opts):
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911 b"a" in repo.dirstate
911 b"a" in repo.dirstate
912 def d():
912 def d():
913 repo.dirstate.invalidate()
913 repo.dirstate.invalidate()
914 b"a" in repo.dirstate
914 b"a" in repo.dirstate
915 timer(d)
915 timer(d)
916 fm.end()
916 fm.end()
917
917
918 @command(b'perfdirstatedirs', formatteropts)
918 @command(b'perfdirstatedirs', formatteropts)
919 def perfdirstatedirs(ui, repo, **opts):
919 def perfdirstatedirs(ui, repo, **opts):
920 opts = _byteskwargs(opts)
920 opts = _byteskwargs(opts)
921 timer, fm = gettimer(ui, opts)
921 timer, fm = gettimer(ui, opts)
922 b"a" in repo.dirstate
922 b"a" in repo.dirstate
923 def d():
923 def d():
924 repo.dirstate.hasdir(b"a")
924 repo.dirstate.hasdir(b"a")
925 del repo.dirstate._map._dirs
925 del repo.dirstate._map._dirs
926 timer(d)
926 timer(d)
927 fm.end()
927 fm.end()
928
928
929 @command(b'perfdirstatefoldmap', formatteropts)
929 @command(b'perfdirstatefoldmap', formatteropts)
930 def perfdirstatefoldmap(ui, repo, **opts):
930 def perfdirstatefoldmap(ui, repo, **opts):
931 opts = _byteskwargs(opts)
931 opts = _byteskwargs(opts)
932 timer, fm = gettimer(ui, opts)
932 timer, fm = gettimer(ui, opts)
933 dirstate = repo.dirstate
933 dirstate = repo.dirstate
934 b'a' in dirstate
934 b'a' in dirstate
935 def d():
935 def d():
936 dirstate._map.filefoldmap.get(b'a')
936 dirstate._map.filefoldmap.get(b'a')
937 del dirstate._map.filefoldmap
937 del dirstate._map.filefoldmap
938 timer(d)
938 timer(d)
939 fm.end()
939 fm.end()
940
940
941 @command(b'perfdirfoldmap', formatteropts)
941 @command(b'perfdirfoldmap', formatteropts)
942 def perfdirfoldmap(ui, repo, **opts):
942 def perfdirfoldmap(ui, repo, **opts):
943 opts = _byteskwargs(opts)
943 opts = _byteskwargs(opts)
944 timer, fm = gettimer(ui, opts)
944 timer, fm = gettimer(ui, opts)
945 dirstate = repo.dirstate
945 dirstate = repo.dirstate
946 b'a' in dirstate
946 b'a' in dirstate
947 def d():
947 def d():
948 dirstate._map.dirfoldmap.get(b'a')
948 dirstate._map.dirfoldmap.get(b'a')
949 del dirstate._map.dirfoldmap
949 del dirstate._map.dirfoldmap
950 del dirstate._map._dirs
950 del dirstate._map._dirs
951 timer(d)
951 timer(d)
952 fm.end()
952 fm.end()
953
953
954 @command(b'perfdirstatewrite', formatteropts)
954 @command(b'perfdirstatewrite', formatteropts)
955 def perfdirstatewrite(ui, repo, **opts):
955 def perfdirstatewrite(ui, repo, **opts):
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957 timer, fm = gettimer(ui, opts)
957 timer, fm = gettimer(ui, opts)
958 ds = repo.dirstate
958 ds = repo.dirstate
959 b"a" in ds
959 b"a" in ds
960 def d():
960 def d():
961 ds._dirty = True
961 ds._dirty = True
962 ds.write(repo.currenttransaction())
962 ds.write(repo.currenttransaction())
963 timer(d)
963 timer(d)
964 fm.end()
964 fm.end()
965
965
966 def _getmergerevs(repo, opts):
966 def _getmergerevs(repo, opts):
967 """parse command argument to return rev involved in merge
967 """parse command argument to return rev involved in merge
968
968
969 input: options dictionnary with `rev`, `from` and `bse`
969 input: options dictionnary with `rev`, `from` and `bse`
970 output: (localctx, otherctx, basectx)
970 output: (localctx, otherctx, basectx)
971 """
971 """
972 if opts['from']:
972 if opts['from']:
973 fromrev = scmutil.revsingle(repo, opts['from'])
973 fromrev = scmutil.revsingle(repo, opts['from'])
974 wctx = repo[fromrev]
974 wctx = repo[fromrev]
975 else:
975 else:
976 wctx = repo[None]
976 wctx = repo[None]
977 # we don't want working dir files to be stat'd in the benchmark, so
977 # we don't want working dir files to be stat'd in the benchmark, so
978 # prime that cache
978 # prime that cache
979 wctx.dirty()
979 wctx.dirty()
980 rctx = scmutil.revsingle(repo, opts['rev'], opts['rev'])
980 rctx = scmutil.revsingle(repo, opts['rev'], opts['rev'])
981 if opts['base']:
981 if opts['base']:
982 fromrev = scmutil.revsingle(repo, opts['base'])
982 fromrev = scmutil.revsingle(repo, opts['base'])
983 ancestor = repo[fromrev]
983 ancestor = repo[fromrev]
984 else:
984 else:
985 ancestor = wctx.ancestor(rctx)
985 ancestor = wctx.ancestor(rctx)
986 return (wctx, rctx, ancestor)
986 return (wctx, rctx, ancestor)
987
987
988 @command(b'perfmergecalculate',
988 @command(b'perfmergecalculate',
989 [
989 [
990 (b'r', b'rev', b'.', b'rev to merge against'),
990 (b'r', b'rev', b'.', b'rev to merge against'),
991 (b'', b'from', b'', b'rev to merge from'),
991 (b'', b'from', b'', b'rev to merge from'),
992 (b'', b'base', b'', b'the revision to use as base'),
992 (b'', b'base', b'', b'the revision to use as base'),
993 ] + formatteropts)
993 ] + formatteropts)
994 def perfmergecalculate(ui, repo, **opts):
994 def perfmergecalculate(ui, repo, **opts):
995 opts = _byteskwargs(opts)
995 opts = _byteskwargs(opts)
996 timer, fm = gettimer(ui, opts)
996 timer, fm = gettimer(ui, opts)
997
997
998 wctx, rctx, ancestor = _getmergerevs(repo, opts)
998 wctx, rctx, ancestor = _getmergerevs(repo, opts)
999 def d():
999 def d():
1000 # acceptremote is True because we don't want prompts in the middle of
1000 # acceptremote is True because we don't want prompts in the middle of
1001 # our benchmark
1001 # our benchmark
1002 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1002 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1003 acceptremote=True, followcopies=True)
1003 acceptremote=True, followcopies=True)
1004 timer(d)
1004 timer(d)
1005 fm.end()
1005 fm.end()
1006
1006
1007 @command(b'perfmergecopies',
1007 @command(b'perfmergecopies',
1008 [
1008 [
1009 (b'r', b'rev', b'.', b'rev to merge against'),
1009 (b'r', b'rev', b'.', b'rev to merge against'),
1010 (b'', b'from', b'', b'rev to merge from'),
1010 (b'', b'from', b'', b'rev to merge from'),
1011 (b'', b'base', b'', b'the revision to use as base'),
1011 (b'', b'base', b'', b'the revision to use as base'),
1012 ] + formatteropts)
1012 ] + formatteropts)
1013 def perfmergecopies(ui, repo, **opts):
1013 def perfmergecopies(ui, repo, **opts):
1014 """measure runtime of `copies.mergecopies`"""
1014 """measure runtime of `copies.mergecopies`"""
1015 opts = _byteskwargs(opts)
1015 opts = _byteskwargs(opts)
1016 timer, fm = gettimer(ui, opts)
1016 timer, fm = gettimer(ui, opts)
1017 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1017 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1018 def d():
1018 def d():
1019 # acceptremote is True because we don't want prompts in the middle of
1019 # acceptremote is True because we don't want prompts in the middle of
1020 # our benchmark
1020 # our benchmark
1021 copies.mergecopies(repo, wctx, rctx, ancestor)
1021 copies.mergecopies(repo, wctx, rctx, ancestor)
1022 timer(d)
1022 timer(d)
1023 fm.end()
1023 fm.end()
1024
1024
1025 @command(b'perfpathcopies', [], b"REV REV")
1025 @command(b'perfpathcopies', [], b"REV REV")
1026 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1026 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1027 """benchmark the copy tracing logic"""
1027 """benchmark the copy tracing logic"""
1028 opts = _byteskwargs(opts)
1028 opts = _byteskwargs(opts)
1029 timer, fm = gettimer(ui, opts)
1029 timer, fm = gettimer(ui, opts)
1030 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1030 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1031 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1031 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1032 def d():
1032 def d():
1033 copies.pathcopies(ctx1, ctx2)
1033 copies.pathcopies(ctx1, ctx2)
1034 timer(d)
1034 timer(d)
1035 fm.end()
1035 fm.end()
1036
1036
1037 @command(b'perfphases',
1037 @command(b'perfphases',
1038 [(b'', b'full', False, b'include file reading time too'),
1038 [(b'', b'full', False, b'include file reading time too'),
1039 ], b"")
1039 ], b"")
1040 def perfphases(ui, repo, **opts):
1040 def perfphases(ui, repo, **opts):
1041 """benchmark phasesets computation"""
1041 """benchmark phasesets computation"""
1042 opts = _byteskwargs(opts)
1042 opts = _byteskwargs(opts)
1043 timer, fm = gettimer(ui, opts)
1043 timer, fm = gettimer(ui, opts)
1044 _phases = repo._phasecache
1044 _phases = repo._phasecache
1045 full = opts.get(b'full')
1045 full = opts.get(b'full')
1046 def d():
1046 def d():
1047 phases = _phases
1047 phases = _phases
1048 if full:
1048 if full:
1049 clearfilecache(repo, b'_phasecache')
1049 clearfilecache(repo, b'_phasecache')
1050 phases = repo._phasecache
1050 phases = repo._phasecache
1051 phases.invalidate()
1051 phases.invalidate()
1052 phases.loadphaserevs(repo)
1052 phases.loadphaserevs(repo)
1053 timer(d)
1053 timer(d)
1054 fm.end()
1054 fm.end()
1055
1055
1056 @command(b'perfphasesremote',
1056 @command(b'perfphasesremote',
1057 [], b"[DEST]")
1057 [], b"[DEST]")
1058 def perfphasesremote(ui, repo, dest=None, **opts):
1058 def perfphasesremote(ui, repo, dest=None, **opts):
1059 """benchmark time needed to analyse phases of the remote server"""
1059 """benchmark time needed to analyse phases of the remote server"""
1060 from mercurial.node import (
1060 from mercurial.node import (
1061 bin,
1061 bin,
1062 )
1062 )
1063 from mercurial import (
1063 from mercurial import (
1064 exchange,
1064 exchange,
1065 hg,
1065 hg,
1066 phases,
1066 phases,
1067 )
1067 )
1068 opts = _byteskwargs(opts)
1068 opts = _byteskwargs(opts)
1069 timer, fm = gettimer(ui, opts)
1069 timer, fm = gettimer(ui, opts)
1070
1070
1071 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1071 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1072 if not path:
1072 if not path:
1073 raise error.Abort((b'default repository not configured!'),
1073 raise error.Abort((b'default repository not configured!'),
1074 hint=(b"see 'hg help config.paths'"))
1074 hint=(b"see 'hg help config.paths'"))
1075 dest = path.pushloc or path.loc
1075 dest = path.pushloc or path.loc
1076 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1076 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1077 other = hg.peer(repo, opts, dest)
1077 other = hg.peer(repo, opts, dest)
1078
1078
1079 # easier to perform discovery through the operation
1079 # easier to perform discovery through the operation
1080 op = exchange.pushoperation(repo, other)
1080 op = exchange.pushoperation(repo, other)
1081 exchange._pushdiscoverychangeset(op)
1081 exchange._pushdiscoverychangeset(op)
1082
1082
1083 remotesubset = op.fallbackheads
1083 remotesubset = op.fallbackheads
1084
1084
1085 with other.commandexecutor() as e:
1085 with other.commandexecutor() as e:
1086 remotephases = e.callcommand(b'listkeys',
1086 remotephases = e.callcommand(b'listkeys',
1087 {b'namespace': b'phases'}).result()
1087 {b'namespace': b'phases'}).result()
1088 del other
1088 del other
1089 publishing = remotephases.get(b'publishing', False)
1089 publishing = remotephases.get(b'publishing', False)
1090 if publishing:
1090 if publishing:
1091 ui.status((b'publishing: yes\n'))
1091 ui.status((b'publishing: yes\n'))
1092 else:
1092 else:
1093 ui.status((b'publishing: no\n'))
1093 ui.status((b'publishing: no\n'))
1094
1094
1095 nodemap = repo.changelog.nodemap
1095 nodemap = repo.changelog.nodemap
1096 nonpublishroots = 0
1096 nonpublishroots = 0
1097 for nhex, phase in remotephases.iteritems():
1097 for nhex, phase in remotephases.iteritems():
1098 if nhex == b'publishing': # ignore data related to publish option
1098 if nhex == b'publishing': # ignore data related to publish option
1099 continue
1099 continue
1100 node = bin(nhex)
1100 node = bin(nhex)
1101 if node in nodemap and int(phase):
1101 if node in nodemap and int(phase):
1102 nonpublishroots += 1
1102 nonpublishroots += 1
1103 ui.status((b'number of roots: %d\n') % len(remotephases))
1103 ui.status((b'number of roots: %d\n') % len(remotephases))
1104 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1104 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1105 def d():
1105 def d():
1106 phases.remotephasessummary(repo,
1106 phases.remotephasessummary(repo,
1107 remotesubset,
1107 remotesubset,
1108 remotephases)
1108 remotephases)
1109 timer(d)
1109 timer(d)
1110 fm.end()
1110 fm.end()
1111
1111
1112 @command(b'perfmanifest',[
1112 @command(b'perfmanifest',[
1113 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1113 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1114 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1114 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1115 ] + formatteropts, b'REV|NODE')
1115 ] + formatteropts, b'REV|NODE')
1116 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1116 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1117 """benchmark the time to read a manifest from disk and return a usable
1117 """benchmark the time to read a manifest from disk and return a usable
1118 dict-like object
1118 dict-like object
1119
1119
1120 Manifest caches are cleared before retrieval."""
1120 Manifest caches are cleared before retrieval."""
1121 opts = _byteskwargs(opts)
1121 opts = _byteskwargs(opts)
1122 timer, fm = gettimer(ui, opts)
1122 timer, fm = gettimer(ui, opts)
1123 if not manifest_rev:
1123 if not manifest_rev:
1124 ctx = scmutil.revsingle(repo, rev, rev)
1124 ctx = scmutil.revsingle(repo, rev, rev)
1125 t = ctx.manifestnode()
1125 t = ctx.manifestnode()
1126 else:
1126 else:
1127 from mercurial.node import bin
1127 from mercurial.node import bin
1128
1128
1129 if len(rev) == 40:
1129 if len(rev) == 40:
1130 t = bin(rev)
1130 t = bin(rev)
1131 else:
1131 else:
1132 try:
1132 try:
1133 rev = int(rev)
1133 rev = int(rev)
1134
1134
1135 if util.safehasattr(repo.manifestlog, b'getstorage'):
1135 if util.safehasattr(repo.manifestlog, b'getstorage'):
1136 t = repo.manifestlog.getstorage(b'').node(rev)
1136 t = repo.manifestlog.getstorage(b'').node(rev)
1137 else:
1137 else:
1138 t = repo.manifestlog._revlog.lookup(rev)
1138 t = repo.manifestlog._revlog.lookup(rev)
1139 except ValueError:
1139 except ValueError:
1140 raise error.Abort(b'manifest revision must be integer or full '
1140 raise error.Abort(b'manifest revision must be integer or full '
1141 b'node')
1141 b'node')
1142 def d():
1142 def d():
1143 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1143 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1144 repo.manifestlog[t].read()
1144 repo.manifestlog[t].read()
1145 timer(d)
1145 timer(d)
1146 fm.end()
1146 fm.end()
1147
1147
1148 @command(b'perfchangeset', formatteropts)
1148 @command(b'perfchangeset', formatteropts)
1149 def perfchangeset(ui, repo, rev, **opts):
1149 def perfchangeset(ui, repo, rev, **opts):
1150 opts = _byteskwargs(opts)
1150 opts = _byteskwargs(opts)
1151 timer, fm = gettimer(ui, opts)
1151 timer, fm = gettimer(ui, opts)
1152 n = scmutil.revsingle(repo, rev).node()
1152 n = scmutil.revsingle(repo, rev).node()
1153 def d():
1153 def d():
1154 repo.changelog.read(n)
1154 repo.changelog.read(n)
1155 #repo.changelog._cache = None
1155 #repo.changelog._cache = None
1156 timer(d)
1156 timer(d)
1157 fm.end()
1157 fm.end()
1158
1158
1159 @command(b'perfignore', formatteropts)
1159 @command(b'perfignore', formatteropts)
1160 def perfignore(ui, repo, **opts):
1160 def perfignore(ui, repo, **opts):
1161 """benchmark operation related to computing ignore"""
1161 """benchmark operation related to computing ignore"""
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1163 timer, fm = gettimer(ui, opts)
1164 dirstate = repo.dirstate
1164 dirstate = repo.dirstate
1165
1165
1166 def setupone():
1166 def setupone():
1167 dirstate.invalidate()
1167 dirstate.invalidate()
1168 clearfilecache(dirstate, b'_ignore')
1168 clearfilecache(dirstate, b'_ignore')
1169
1169
1170 def runone():
1170 def runone():
1171 dirstate._ignore
1171 dirstate._ignore
1172
1172
1173 timer(runone, setup=setupone, title=b"load")
1173 timer(runone, setup=setupone, title=b"load")
1174 fm.end()
1174 fm.end()
1175
1175
1176 @command(b'perfindex', [
1176 @command(b'perfindex', [
1177 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1177 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1178 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1178 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1179 ] + formatteropts)
1179 ] + formatteropts)
1180 def perfindex(ui, repo, **opts):
1180 def perfindex(ui, repo, **opts):
1181 """benchmark index creation time followed by a lookup
1181 """benchmark index creation time followed by a lookup
1182
1182
1183 The default is to look `tip` up. Depending on the index implementation,
1183 The default is to look `tip` up. Depending on the index implementation,
1184 the revision looked up can matters. For example, an implementation
1184 the revision looked up can matters. For example, an implementation
1185 scanning the index will have a faster lookup time for `--rev tip` than for
1185 scanning the index will have a faster lookup time for `--rev tip` than for
1186 `--rev 0`. The number of looked up revisions and their order can also
1186 `--rev 0`. The number of looked up revisions and their order can also
1187 matters.
1187 matters.
1188
1188
1189 Example of useful set to test:
1189 Example of useful set to test:
1190 * tip
1190 * tip
1191 * 0
1191 * 0
1192 * -10:
1192 * -10:
1193 * :10
1193 * :10
1194 * -10: + :10
1194 * -10: + :10
1195 * :10: + -10:
1195 * :10: + -10:
1196 * -10000:
1196 * -10000:
1197 * -10000: + 0
1197 * -10000: + 0
1198
1198
1199 It is not currently possible to check for lookup of a missing node. For
1199 It is not currently possible to check for lookup of a missing node. For
1200 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1200 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1201 import mercurial.revlog
1201 import mercurial.revlog
1202 opts = _byteskwargs(opts)
1202 opts = _byteskwargs(opts)
1203 timer, fm = gettimer(ui, opts)
1203 timer, fm = gettimer(ui, opts)
1204 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1204 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1205 if opts[b'no_lookup']:
1205 if opts[b'no_lookup']:
1206 if opts['rev']:
1206 if opts['rev']:
1207 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1207 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1208 nodes = []
1208 nodes = []
1209 elif not opts[b'rev']:
1209 elif not opts[b'rev']:
1210 nodes = [repo[b"tip"].node()]
1210 nodes = [repo[b"tip"].node()]
1211 else:
1211 else:
1212 revs = scmutil.revrange(repo, opts[b'rev'])
1212 revs = scmutil.revrange(repo, opts[b'rev'])
1213 cl = repo.changelog
1213 cl = repo.changelog
1214 nodes = [cl.node(r) for r in revs]
1214 nodes = [cl.node(r) for r in revs]
1215
1215
1216 unfi = repo.unfiltered()
1216 unfi = repo.unfiltered()
1217 # find the filecache func directly
1217 # find the filecache func directly
1218 # This avoid polluting the benchmark with the filecache logic
1218 # This avoid polluting the benchmark with the filecache logic
1219 makecl = unfi.__class__.changelog.func
1219 makecl = unfi.__class__.changelog.func
1220 def setup():
1220 def setup():
1221 # probably not necessary, but for good measure
1221 # probably not necessary, but for good measure
1222 clearchangelog(unfi)
1222 clearchangelog(unfi)
1223 def d():
1223 def d():
1224 cl = makecl(unfi)
1224 cl = makecl(unfi)
1225 for n in nodes:
1225 for n in nodes:
1226 cl.rev(n)
1226 cl.rev(n)
1227 timer(d, setup=setup)
1227 timer(d, setup=setup)
1228 fm.end()
1228 fm.end()
1229
1229
1230 @command(b'perfnodemap', [
1230 @command(b'perfnodemap', [
1231 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1231 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1232 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1232 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1233 ] + formatteropts)
1233 ] + formatteropts)
1234 def perfnodemap(ui, repo, **opts):
1234 def perfnodemap(ui, repo, **opts):
1235 """benchmark the time necessary to look up revision from a cold nodemap
1235 """benchmark the time necessary to look up revision from a cold nodemap
1236
1236
1237 Depending on the implementation, the amount and order of revision we look
1237 Depending on the implementation, the amount and order of revision we look
1238 up can varies. Example of useful set to test:
1238 up can varies. Example of useful set to test:
1239 * tip
1239 * tip
1240 * 0
1240 * 0
1241 * -10:
1241 * -10:
1242 * :10
1242 * :10
1243 * -10: + :10
1243 * -10: + :10
1244 * :10: + -10:
1244 * :10: + -10:
1245 * -10000:
1245 * -10000:
1246 * -10000: + 0
1246 * -10000: + 0
1247
1247
1248 The command currently focus on valid binary lookup. Benchmarking for
1248 The command currently focus on valid binary lookup. Benchmarking for
1249 hexlookup, prefix lookup and missing lookup would also be valuable.
1249 hexlookup, prefix lookup and missing lookup would also be valuable.
1250 """
1250 """
1251 import mercurial.revlog
1251 import mercurial.revlog
1252 opts = _byteskwargs(opts)
1252 opts = _byteskwargs(opts)
1253 timer, fm = gettimer(ui, opts)
1253 timer, fm = gettimer(ui, opts)
1254 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1254 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1255
1255
1256 unfi = repo.unfiltered()
1256 unfi = repo.unfiltered()
1257 clearcaches = opts['clear_caches']
1257 clearcaches = opts['clear_caches']
1258 # find the filecache func directly
1258 # find the filecache func directly
1259 # This avoid polluting the benchmark with the filecache logic
1259 # This avoid polluting the benchmark with the filecache logic
1260 makecl = unfi.__class__.changelog.func
1260 makecl = unfi.__class__.changelog.func
1261 if not opts[b'rev']:
1261 if not opts[b'rev']:
1262 raise error.Abort('use --rev to specify revisions to look up')
1262 raise error.Abort('use --rev to specify revisions to look up')
1263 revs = scmutil.revrange(repo, opts[b'rev'])
1263 revs = scmutil.revrange(repo, opts[b'rev'])
1264 cl = repo.changelog
1264 cl = repo.changelog
1265 nodes = [cl.node(r) for r in revs]
1265 nodes = [cl.node(r) for r in revs]
1266
1266
1267 # use a list to pass reference to a nodemap from one closure to the next
1267 # use a list to pass reference to a nodemap from one closure to the next
1268 nodeget = [None]
1268 nodeget = [None]
1269 def setnodeget():
1269 def setnodeget():
1270 # probably not necessary, but for good measure
1270 # probably not necessary, but for good measure
1271 clearchangelog(unfi)
1271 clearchangelog(unfi)
1272 nodeget[0] = makecl(unfi).nodemap.get
1272 nodeget[0] = makecl(unfi).nodemap.get
1273
1273
1274 def d():
1274 def d():
1275 get = nodeget[0]
1275 get = nodeget[0]
1276 for n in nodes:
1276 for n in nodes:
1277 get(n)
1277 get(n)
1278
1278
1279 setup = None
1279 setup = None
1280 if clearcaches:
1280 if clearcaches:
1281 def setup():
1281 def setup():
1282 setnodeget()
1282 setnodeget()
1283 else:
1283 else:
1284 setnodeget()
1284 setnodeget()
1285 d() # prewarm the data structure
1285 d() # prewarm the data structure
1286 timer(d, setup=setup)
1286 timer(d, setup=setup)
1287 fm.end()
1287 fm.end()
1288
1288
1289 @command(b'perfstartup', formatteropts)
1289 @command(b'perfstartup', formatteropts)
1290 def perfstartup(ui, repo, **opts):
1290 def perfstartup(ui, repo, **opts):
1291 opts = _byteskwargs(opts)
1291 opts = _byteskwargs(opts)
1292 timer, fm = gettimer(ui, opts)
1292 timer, fm = gettimer(ui, opts)
1293 def d():
1293 def d():
1294 if os.name != r'nt':
1294 if os.name != r'nt':
1295 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1295 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1296 fsencode(sys.argv[0]))
1296 fsencode(sys.argv[0]))
1297 else:
1297 else:
1298 os.environ[r'HGRCPATH'] = r' '
1298 os.environ[r'HGRCPATH'] = r' '
1299 os.system(r"%s version -q > NUL" % sys.argv[0])
1299 os.system(r"%s version -q > NUL" % sys.argv[0])
1300 timer(d)
1300 timer(d)
1301 fm.end()
1301 fm.end()
1302
1302
1303 @command(b'perfparents', formatteropts)
1303 @command(b'perfparents', formatteropts)
1304 def perfparents(ui, repo, **opts):
1304 def perfparents(ui, repo, **opts):
1305 """benchmark the time necessary to fetch one changeset's parents.
1305 """benchmark the time necessary to fetch one changeset's parents.
1306
1306
1307 The fetch is done using the `node identifier`, traversing all object layers
1307 The fetch is done using the `node identifier`, traversing all object layers
1308 from the repository object. The first N revisions will be used for this
1308 from the repository object. The first N revisions will be used for this
1309 benchmark. N is controlled by the ``perf.parentscount`` config option
1309 benchmark. N is controlled by the ``perf.parentscount`` config option
1310 (default: 1000).
1310 (default: 1000).
1311 """
1311 """
1312 opts = _byteskwargs(opts)
1312 opts = _byteskwargs(opts)
1313 timer, fm = gettimer(ui, opts)
1313 timer, fm = gettimer(ui, opts)
1314 # control the number of commits perfparents iterates over
1314 # control the number of commits perfparents iterates over
1315 # experimental config: perf.parentscount
1315 # experimental config: perf.parentscount
1316 count = getint(ui, b"perf", b"parentscount", 1000)
1316 count = getint(ui, b"perf", b"parentscount", 1000)
1317 if len(repo.changelog) < count:
1317 if len(repo.changelog) < count:
1318 raise error.Abort(b"repo needs %d commits for this test" % count)
1318 raise error.Abort(b"repo needs %d commits for this test" % count)
1319 repo = repo.unfiltered()
1319 repo = repo.unfiltered()
1320 nl = [repo.changelog.node(i) for i in _xrange(count)]
1320 nl = [repo.changelog.node(i) for i in _xrange(count)]
1321 def d():
1321 def d():
1322 for n in nl:
1322 for n in nl:
1323 repo.changelog.parents(n)
1323 repo.changelog.parents(n)
1324 timer(d)
1324 timer(d)
1325 fm.end()
1325 fm.end()
1326
1326
1327 @command(b'perfctxfiles', formatteropts)
1327 @command(b'perfctxfiles', formatteropts)
1328 def perfctxfiles(ui, repo, x, **opts):
1328 def perfctxfiles(ui, repo, x, **opts):
1329 opts = _byteskwargs(opts)
1329 opts = _byteskwargs(opts)
1330 x = int(x)
1330 x = int(x)
1331 timer, fm = gettimer(ui, opts)
1331 timer, fm = gettimer(ui, opts)
1332 def d():
1332 def d():
1333 len(repo[x].files())
1333 len(repo[x].files())
1334 timer(d)
1334 timer(d)
1335 fm.end()
1335 fm.end()
1336
1336
1337 @command(b'perfrawfiles', formatteropts)
1337 @command(b'perfrawfiles', formatteropts)
1338 def perfrawfiles(ui, repo, x, **opts):
1338 def perfrawfiles(ui, repo, x, **opts):
1339 opts = _byteskwargs(opts)
1339 opts = _byteskwargs(opts)
1340 x = int(x)
1340 x = int(x)
1341 timer, fm = gettimer(ui, opts)
1341 timer, fm = gettimer(ui, opts)
1342 cl = repo.changelog
1342 cl = repo.changelog
1343 def d():
1343 def d():
1344 len(cl.read(x)[3])
1344 len(cl.read(x)[3])
1345 timer(d)
1345 timer(d)
1346 fm.end()
1346 fm.end()
1347
1347
1348 @command(b'perflookup', formatteropts)
1348 @command(b'perflookup', formatteropts)
1349 def perflookup(ui, repo, rev, **opts):
1349 def perflookup(ui, repo, rev, **opts):
1350 opts = _byteskwargs(opts)
1350 opts = _byteskwargs(opts)
1351 timer, fm = gettimer(ui, opts)
1351 timer, fm = gettimer(ui, opts)
1352 timer(lambda: len(repo.lookup(rev)))
1352 timer(lambda: len(repo.lookup(rev)))
1353 fm.end()
1353 fm.end()
1354
1354
1355 @command(b'perflinelogedits',
1355 @command(b'perflinelogedits',
1356 [(b'n', b'edits', 10000, b'number of edits'),
1356 [(b'n', b'edits', 10000, b'number of edits'),
1357 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1357 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1358 ], norepo=True)
1358 ], norepo=True)
1359 def perflinelogedits(ui, **opts):
1359 def perflinelogedits(ui, **opts):
1360 from mercurial import linelog
1360 from mercurial import linelog
1361
1361
1362 opts = _byteskwargs(opts)
1362 opts = _byteskwargs(opts)
1363
1363
1364 edits = opts[b'edits']
1364 edits = opts[b'edits']
1365 maxhunklines = opts[b'max_hunk_lines']
1365 maxhunklines = opts[b'max_hunk_lines']
1366
1366
1367 maxb1 = 100000
1367 maxb1 = 100000
1368 random.seed(0)
1368 random.seed(0)
1369 randint = random.randint
1369 randint = random.randint
1370 currentlines = 0
1370 currentlines = 0
1371 arglist = []
1371 arglist = []
1372 for rev in _xrange(edits):
1372 for rev in _xrange(edits):
1373 a1 = randint(0, currentlines)
1373 a1 = randint(0, currentlines)
1374 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1374 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1375 b1 = randint(0, maxb1)
1375 b1 = randint(0, maxb1)
1376 b2 = randint(b1, b1 + maxhunklines)
1376 b2 = randint(b1, b1 + maxhunklines)
1377 currentlines += (b2 - b1) - (a2 - a1)
1377 currentlines += (b2 - b1) - (a2 - a1)
1378 arglist.append((rev, a1, a2, b1, b2))
1378 arglist.append((rev, a1, a2, b1, b2))
1379
1379
1380 def d():
1380 def d():
1381 ll = linelog.linelog()
1381 ll = linelog.linelog()
1382 for args in arglist:
1382 for args in arglist:
1383 ll.replacelines(*args)
1383 ll.replacelines(*args)
1384
1384
1385 timer, fm = gettimer(ui, opts)
1385 timer, fm = gettimer(ui, opts)
1386 timer(d)
1386 timer(d)
1387 fm.end()
1387 fm.end()
1388
1388
1389 @command(b'perfrevrange', formatteropts)
1389 @command(b'perfrevrange', formatteropts)
1390 def perfrevrange(ui, repo, *specs, **opts):
1390 def perfrevrange(ui, repo, *specs, **opts):
1391 opts = _byteskwargs(opts)
1391 opts = _byteskwargs(opts)
1392 timer, fm = gettimer(ui, opts)
1392 timer, fm = gettimer(ui, opts)
1393 revrange = scmutil.revrange
1393 revrange = scmutil.revrange
1394 timer(lambda: len(revrange(repo, specs)))
1394 timer(lambda: len(revrange(repo, specs)))
1395 fm.end()
1395 fm.end()
1396
1396
1397 @command(b'perfnodelookup', formatteropts)
1397 @command(b'perfnodelookup', formatteropts)
1398 def perfnodelookup(ui, repo, rev, **opts):
1398 def perfnodelookup(ui, repo, rev, **opts):
1399 opts = _byteskwargs(opts)
1399 opts = _byteskwargs(opts)
1400 timer, fm = gettimer(ui, opts)
1400 timer, fm = gettimer(ui, opts)
1401 import mercurial.revlog
1401 import mercurial.revlog
1402 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1402 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1403 n = scmutil.revsingle(repo, rev).node()
1403 n = scmutil.revsingle(repo, rev).node()
1404 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1404 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1405 def d():
1405 def d():
1406 cl.rev(n)
1406 cl.rev(n)
1407 clearcaches(cl)
1407 clearcaches(cl)
1408 timer(d)
1408 timer(d)
1409 fm.end()
1409 fm.end()
1410
1410
1411 @command(b'perflog',
1411 @command(b'perflog',
1412 [(b'', b'rename', False, b'ask log to follow renames')
1412 [(b'', b'rename', False, b'ask log to follow renames')
1413 ] + formatteropts)
1413 ] + formatteropts)
1414 def perflog(ui, repo, rev=None, **opts):
1414 def perflog(ui, repo, rev=None, **opts):
1415 opts = _byteskwargs(opts)
1415 opts = _byteskwargs(opts)
1416 if rev is None:
1416 if rev is None:
1417 rev=[]
1417 rev=[]
1418 timer, fm = gettimer(ui, opts)
1418 timer, fm = gettimer(ui, opts)
1419 ui.pushbuffer()
1419 ui.pushbuffer()
1420 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1420 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1421 copies=opts.get(b'rename')))
1421 copies=opts.get(b'rename')))
1422 ui.popbuffer()
1422 ui.popbuffer()
1423 fm.end()
1423 fm.end()
1424
1424
1425 @command(b'perfmoonwalk', formatteropts)
1425 @command(b'perfmoonwalk', formatteropts)
1426 def perfmoonwalk(ui, repo, **opts):
1426 def perfmoonwalk(ui, repo, **opts):
1427 """benchmark walking the changelog backwards
1427 """benchmark walking the changelog backwards
1428
1428
1429 This also loads the changelog data for each revision in the changelog.
1429 This also loads the changelog data for each revision in the changelog.
1430 """
1430 """
1431 opts = _byteskwargs(opts)
1431 opts = _byteskwargs(opts)
1432 timer, fm = gettimer(ui, opts)
1432 timer, fm = gettimer(ui, opts)
1433 def moonwalk():
1433 def moonwalk():
1434 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1434 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1435 ctx = repo[i]
1435 ctx = repo[i]
1436 ctx.branch() # read changelog data (in addition to the index)
1436 ctx.branch() # read changelog data (in addition to the index)
1437 timer(moonwalk)
1437 timer(moonwalk)
1438 fm.end()
1438 fm.end()
1439
1439
1440 @command(b'perftemplating',
1440 @command(b'perftemplating',
1441 [(b'r', b'rev', [], b'revisions to run the template on'),
1441 [(b'r', b'rev', [], b'revisions to run the template on'),
1442 ] + formatteropts)
1442 ] + formatteropts)
1443 def perftemplating(ui, repo, testedtemplate=None, **opts):
1443 def perftemplating(ui, repo, testedtemplate=None, **opts):
1444 """test the rendering time of a given template"""
1444 """test the rendering time of a given template"""
1445 if makelogtemplater is None:
1445 if makelogtemplater is None:
1446 raise error.Abort((b"perftemplating not available with this Mercurial"),
1446 raise error.Abort((b"perftemplating not available with this Mercurial"),
1447 hint=b"use 4.3 or later")
1447 hint=b"use 4.3 or later")
1448
1448
1449 opts = _byteskwargs(opts)
1449 opts = _byteskwargs(opts)
1450
1450
1451 nullui = ui.copy()
1451 nullui = ui.copy()
1452 nullui.fout = open(os.devnull, r'wb')
1452 nullui.fout = open(os.devnull, r'wb')
1453 nullui.disablepager()
1453 nullui.disablepager()
1454 revs = opts.get(b'rev')
1454 revs = opts.get(b'rev')
1455 if not revs:
1455 if not revs:
1456 revs = [b'all()']
1456 revs = [b'all()']
1457 revs = list(scmutil.revrange(repo, revs))
1457 revs = list(scmutil.revrange(repo, revs))
1458
1458
1459 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1459 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1460 b' {author|person}: {desc|firstline}\n')
1460 b' {author|person}: {desc|firstline}\n')
1461 if testedtemplate is None:
1461 if testedtemplate is None:
1462 testedtemplate = defaulttemplate
1462 testedtemplate = defaulttemplate
1463 displayer = makelogtemplater(nullui, repo, testedtemplate)
1463 displayer = makelogtemplater(nullui, repo, testedtemplate)
1464 def format():
1464 def format():
1465 for r in revs:
1465 for r in revs:
1466 ctx = repo[r]
1466 ctx = repo[r]
1467 displayer.show(ctx)
1467 displayer.show(ctx)
1468 displayer.flush(ctx)
1468 displayer.flush(ctx)
1469
1469
1470 timer, fm = gettimer(ui, opts)
1470 timer, fm = gettimer(ui, opts)
1471 timer(format)
1471 timer(format)
1472 fm.end()
1472 fm.end()
1473
1473
1474 @command(b'perfhelper-mergecopies', formatteropts +
1475 [
1476 (b'r', b'revs', [], b'restrict search to these revisions'),
1477 (b'', b'timing', False, b'provides extra data (costly)'),
1478 ])
1479 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1480 """find statistics about potential parameters for `perfmergecopies`
1481
1482 This command find (base, p1, p2) triplet relevant for copytracing
1483 benchmarking in the context of a merge. It reports values for some of the
1484 parameters that impact merge copy tracing time during merge.
1485
1486 If `--timing` is set, rename detection is run and the associated timing
1487 will be reported. The extra details come at the cost of slower command
1488 execution.
1489
1490 Since rename detection is only run once, other factors might easily
1491 affect the precision of the timing. However it should give a good
1492 approximation of which revision triplets are very costly.
1493 """
1494 opts = _byteskwargs(opts)
1495 fm = ui.formatter(b'perf', opts)
1496 dotiming = opts[b'timing']
1497
1498 output_template = [
1499 ("base", "%(base)12s"),
1500 ("p1", "%(p1.node)12s"),
1501 ("p2", "%(p2.node)12s"),
1502 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1503 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1504 ("p1.renames", "%(p1.renamedfiles)12d"),
1505 ("p1.time", "%(p1.time)12.3f"),
1506 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1507 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1508 ("p2.renames", "%(p2.renamedfiles)12d"),
1509 ("p2.time", "%(p2.time)12.3f"),
1510 ("renames", "%(nbrenamedfiles)12d"),
1511 ("total.time", "%(time)12.3f"),
1512 ]
1513 if not dotiming:
1514 output_template = [i for i in output_template
1515 if not ('time' in i[0] or 'renames' in i[0])]
1516 header_names = [h for (h, v) in output_template]
1517 output = ' '.join([v for (h, v) in output_template]) + '\n'
1518 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1519 fm.plain(header % tuple(header_names))
1520
1521 if not revs:
1522 revs = ['all()']
1523 revs = scmutil.revrange(repo, revs)
1524
1525 roi = repo.revs('merge() and %ld', revs)
1526 for r in roi:
1527 ctx = repo[r]
1528 p1 = ctx.p1()
1529 p2 = ctx.p2()
1530 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1531 for b in bases:
1532 b = repo[b]
1533 p1missing = copies._computeforwardmissing(b, p1)
1534 p2missing = copies._computeforwardmissing(b, p2)
1535 data = {
1536 b'base': b.hex(),
1537 b'p1.node': p1.hex(),
1538 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1539 b'p1.nbmissingfiles': len(p1missing),
1540 b'p2.node': p2.hex(),
1541 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1542 b'p2.nbmissingfiles': len(p2missing),
1543 }
1544 if dotiming:
1545 begin = util.timer()
1546 mergedata = copies.mergecopies(repo, p1, p2, b)
1547 end = util.timer()
1548 # not very stable timing since we did only one run
1549 data['time'] = end - begin
1550 # mergedata contains five dicts: "copy", "movewithdir",
1551 # "diverge", "renamedelete" and "dirmove".
1552 # The first 4 are about renamed file so lets count that.
1553 renames = len(mergedata[0])
1554 renames += len(mergedata[1])
1555 renames += len(mergedata[2])
1556 renames += len(mergedata[3])
1557 data['nbrenamedfiles'] = renames
1558 begin = util.timer()
1559 p1renames = copies.pathcopies(b, p1)
1560 end = util.timer()
1561 data['p1.time'] = end - begin
1562 begin = util.timer()
1563 p2renames = copies.pathcopies(b, p2)
1564 data['p2.time'] = end - begin
1565 end = util.timer()
1566 data['p1.renamedfiles'] = len(p1renames)
1567 data['p2.renamedfiles'] = len(p2renames)
1568 fm.startitem()
1569 fm.data(**data)
1570 # make node pretty for the human output
1571 out = data.copy()
1572 out['base'] = fm.hexfunc(b.node())
1573 out['p1.node'] = fm.hexfunc(p1.node())
1574 out['p2.node'] = fm.hexfunc(p2.node())
1575 fm.plain(output % out)
1576
1577 fm.end()
1578
1474 @command(b'perfhelper-pathcopies', formatteropts +
1579 @command(b'perfhelper-pathcopies', formatteropts +
1475 [
1580 [
1476 (b'r', b'revs', [], b'restrict search to these revisions'),
1581 (b'r', b'revs', [], b'restrict search to these revisions'),
1477 (b'', b'timing', False, b'provides extra data (costly)'),
1582 (b'', b'timing', False, b'provides extra data (costly)'),
1478 ])
1583 ])
1479 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1584 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1480 """find statistic about potential parameters for the `perftracecopies`
1585 """find statistic about potential parameters for the `perftracecopies`
1481
1586
1482 This command find source-destination pair relevant for copytracing testing.
1587 This command find source-destination pair relevant for copytracing testing.
1483 It report value for some of the parameters that impact copy tracing time.
1588 It report value for some of the parameters that impact copy tracing time.
1484
1589
1485 If `--timing` is set, rename detection is run and the associated timing
1590 If `--timing` is set, rename detection is run and the associated timing
1486 will be reported. The extra details comes at the cost of a slower command
1591 will be reported. The extra details comes at the cost of a slower command
1487 execution.
1592 execution.
1488
1593
1489 Since the rename detection is only run once, other factors might easily
1594 Since the rename detection is only run once, other factors might easily
1490 affect the precision of the timing. However it should give a good
1595 affect the precision of the timing. However it should give a good
1491 approximation of which revision pairs are very costly.
1596 approximation of which revision pairs are very costly.
1492 """
1597 """
1493 opts = _byteskwargs(opts)
1598 opts = _byteskwargs(opts)
1494 fm = ui.formatter(b'perf', opts)
1599 fm = ui.formatter(b'perf', opts)
1495 dotiming = opts[b'timing']
1600 dotiming = opts[b'timing']
1496
1601
1497 if dotiming:
1602 if dotiming:
1498 header = '%12s %12s %12s %12s %12s %12s\n'
1603 header = '%12s %12s %12s %12s %12s %12s\n'
1499 output = ("%(source)12s %(destination)12s "
1604 output = ("%(source)12s %(destination)12s "
1500 "%(nbrevs)12d %(nbmissingfiles)12d "
1605 "%(nbrevs)12d %(nbmissingfiles)12d "
1501 "%(nbrenamedfiles)12d %(time)18.5f\n")
1606 "%(nbrenamedfiles)12d %(time)18.5f\n")
1502 header_names = ("source", "destination", "nb-revs", "nb-files",
1607 header_names = ("source", "destination", "nb-revs", "nb-files",
1503 "nb-renames", "time")
1608 "nb-renames", "time")
1504 fm.plain(header % header_names)
1609 fm.plain(header % header_names)
1505 else:
1610 else:
1506 header = '%12s %12s %12s %12s\n'
1611 header = '%12s %12s %12s %12s\n'
1507 output = ("%(source)12s %(destination)12s "
1612 output = ("%(source)12s %(destination)12s "
1508 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1613 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1509 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1614 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1510
1615
1511 if not revs:
1616 if not revs:
1512 revs = ['all()']
1617 revs = ['all()']
1513 revs = scmutil.revrange(repo, revs)
1618 revs = scmutil.revrange(repo, revs)
1514
1619
1515 roi = repo.revs('merge() and %ld', revs)
1620 roi = repo.revs('merge() and %ld', revs)
1516 for r in roi:
1621 for r in roi:
1517 ctx = repo[r]
1622 ctx = repo[r]
1518 p1 = ctx.p1().rev()
1623 p1 = ctx.p1().rev()
1519 p2 = ctx.p2().rev()
1624 p2 = ctx.p2().rev()
1520 bases = repo.changelog._commonancestorsheads(p1, p2)
1625 bases = repo.changelog._commonancestorsheads(p1, p2)
1521 for p in (p1, p2):
1626 for p in (p1, p2):
1522 for b in bases:
1627 for b in bases:
1523 base = repo[b]
1628 base = repo[b]
1524 parent = repo[p]
1629 parent = repo[p]
1525 missing = copies._computeforwardmissing(base, parent)
1630 missing = copies._computeforwardmissing(base, parent)
1526 if not missing:
1631 if not missing:
1527 continue
1632 continue
1528 data = {
1633 data = {
1529 b'source': base.hex(),
1634 b'source': base.hex(),
1530 b'destination': parent.hex(),
1635 b'destination': parent.hex(),
1531 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1636 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1532 b'nbmissingfiles': len(missing),
1637 b'nbmissingfiles': len(missing),
1533 }
1638 }
1534 if dotiming:
1639 if dotiming:
1535 begin = util.timer()
1640 begin = util.timer()
1536 renames = copies.pathcopies(base, parent)
1641 renames = copies.pathcopies(base, parent)
1537 end = util.timer()
1642 end = util.timer()
1538 # not very stable timing since we did only one run
1643 # not very stable timing since we did only one run
1539 data['time'] = end - begin
1644 data['time'] = end - begin
1540 data['nbrenamedfiles'] = len(renames)
1645 data['nbrenamedfiles'] = len(renames)
1541 fm.startitem()
1646 fm.startitem()
1542 fm.data(**data)
1647 fm.data(**data)
1543 out = data.copy()
1648 out = data.copy()
1544 out['source'] = fm.hexfunc(base.node())
1649 out['source'] = fm.hexfunc(base.node())
1545 out['destination'] = fm.hexfunc(parent.node())
1650 out['destination'] = fm.hexfunc(parent.node())
1546 fm.plain(output % out)
1651 fm.plain(output % out)
1547
1652
1548 fm.end()
1653 fm.end()
1549
1654
1550 @command(b'perfcca', formatteropts)
1655 @command(b'perfcca', formatteropts)
1551 def perfcca(ui, repo, **opts):
1656 def perfcca(ui, repo, **opts):
1552 opts = _byteskwargs(opts)
1657 opts = _byteskwargs(opts)
1553 timer, fm = gettimer(ui, opts)
1658 timer, fm = gettimer(ui, opts)
1554 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1659 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1555 fm.end()
1660 fm.end()
1556
1661
1557 @command(b'perffncacheload', formatteropts)
1662 @command(b'perffncacheload', formatteropts)
1558 def perffncacheload(ui, repo, **opts):
1663 def perffncacheload(ui, repo, **opts):
1559 opts = _byteskwargs(opts)
1664 opts = _byteskwargs(opts)
1560 timer, fm = gettimer(ui, opts)
1665 timer, fm = gettimer(ui, opts)
1561 s = repo.store
1666 s = repo.store
1562 def d():
1667 def d():
1563 s.fncache._load()
1668 s.fncache._load()
1564 timer(d)
1669 timer(d)
1565 fm.end()
1670 fm.end()
1566
1671
1567 @command(b'perffncachewrite', formatteropts)
1672 @command(b'perffncachewrite', formatteropts)
1568 def perffncachewrite(ui, repo, **opts):
1673 def perffncachewrite(ui, repo, **opts):
1569 opts = _byteskwargs(opts)
1674 opts = _byteskwargs(opts)
1570 timer, fm = gettimer(ui, opts)
1675 timer, fm = gettimer(ui, opts)
1571 s = repo.store
1676 s = repo.store
1572 lock = repo.lock()
1677 lock = repo.lock()
1573 s.fncache._load()
1678 s.fncache._load()
1574 tr = repo.transaction(b'perffncachewrite')
1679 tr = repo.transaction(b'perffncachewrite')
1575 tr.addbackup(b'fncache')
1680 tr.addbackup(b'fncache')
1576 def d():
1681 def d():
1577 s.fncache._dirty = True
1682 s.fncache._dirty = True
1578 s.fncache.write(tr)
1683 s.fncache.write(tr)
1579 timer(d)
1684 timer(d)
1580 tr.close()
1685 tr.close()
1581 lock.release()
1686 lock.release()
1582 fm.end()
1687 fm.end()
1583
1688
1584 @command(b'perffncacheencode', formatteropts)
1689 @command(b'perffncacheencode', formatteropts)
1585 def perffncacheencode(ui, repo, **opts):
1690 def perffncacheencode(ui, repo, **opts):
1586 opts = _byteskwargs(opts)
1691 opts = _byteskwargs(opts)
1587 timer, fm = gettimer(ui, opts)
1692 timer, fm = gettimer(ui, opts)
1588 s = repo.store
1693 s = repo.store
1589 s.fncache._load()
1694 s.fncache._load()
1590 def d():
1695 def d():
1591 for p in s.fncache.entries:
1696 for p in s.fncache.entries:
1592 s.encode(p)
1697 s.encode(p)
1593 timer(d)
1698 timer(d)
1594 fm.end()
1699 fm.end()
1595
1700
1596 def _bdiffworker(q, blocks, xdiff, ready, done):
1701 def _bdiffworker(q, blocks, xdiff, ready, done):
1597 while not done.is_set():
1702 while not done.is_set():
1598 pair = q.get()
1703 pair = q.get()
1599 while pair is not None:
1704 while pair is not None:
1600 if xdiff:
1705 if xdiff:
1601 mdiff.bdiff.xdiffblocks(*pair)
1706 mdiff.bdiff.xdiffblocks(*pair)
1602 elif blocks:
1707 elif blocks:
1603 mdiff.bdiff.blocks(*pair)
1708 mdiff.bdiff.blocks(*pair)
1604 else:
1709 else:
1605 mdiff.textdiff(*pair)
1710 mdiff.textdiff(*pair)
1606 q.task_done()
1711 q.task_done()
1607 pair = q.get()
1712 pair = q.get()
1608 q.task_done() # for the None one
1713 q.task_done() # for the None one
1609 with ready:
1714 with ready:
1610 ready.wait()
1715 ready.wait()
1611
1716
1612 def _manifestrevision(repo, mnode):
1717 def _manifestrevision(repo, mnode):
1613 ml = repo.manifestlog
1718 ml = repo.manifestlog
1614
1719
1615 if util.safehasattr(ml, b'getstorage'):
1720 if util.safehasattr(ml, b'getstorage'):
1616 store = ml.getstorage(b'')
1721 store = ml.getstorage(b'')
1617 else:
1722 else:
1618 store = ml._revlog
1723 store = ml._revlog
1619
1724
1620 return store.revision(mnode)
1725 return store.revision(mnode)
1621
1726
1622 @command(b'perfbdiff', revlogopts + formatteropts + [
1727 @command(b'perfbdiff', revlogopts + formatteropts + [
1623 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1728 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1624 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1729 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1625 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1730 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1626 (b'', b'blocks', False, b'test computing diffs into blocks'),
1731 (b'', b'blocks', False, b'test computing diffs into blocks'),
1627 (b'', b'xdiff', False, b'use xdiff algorithm'),
1732 (b'', b'xdiff', False, b'use xdiff algorithm'),
1628 ],
1733 ],
1629
1734
1630 b'-c|-m|FILE REV')
1735 b'-c|-m|FILE REV')
1631 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1736 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1632 """benchmark a bdiff between revisions
1737 """benchmark a bdiff between revisions
1633
1738
1634 By default, benchmark a bdiff between its delta parent and itself.
1739 By default, benchmark a bdiff between its delta parent and itself.
1635
1740
1636 With ``--count``, benchmark bdiffs between delta parents and self for N
1741 With ``--count``, benchmark bdiffs between delta parents and self for N
1637 revisions starting at the specified revision.
1742 revisions starting at the specified revision.
1638
1743
1639 With ``--alldata``, assume the requested revision is a changeset and
1744 With ``--alldata``, assume the requested revision is a changeset and
1640 measure bdiffs for all changes related to that changeset (manifest
1745 measure bdiffs for all changes related to that changeset (manifest
1641 and filelogs).
1746 and filelogs).
1642 """
1747 """
1643 opts = _byteskwargs(opts)
1748 opts = _byteskwargs(opts)
1644
1749
1645 if opts[b'xdiff'] and not opts[b'blocks']:
1750 if opts[b'xdiff'] and not opts[b'blocks']:
1646 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1751 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1647
1752
1648 if opts[b'alldata']:
1753 if opts[b'alldata']:
1649 opts[b'changelog'] = True
1754 opts[b'changelog'] = True
1650
1755
1651 if opts.get(b'changelog') or opts.get(b'manifest'):
1756 if opts.get(b'changelog') or opts.get(b'manifest'):
1652 file_, rev = None, file_
1757 file_, rev = None, file_
1653 elif rev is None:
1758 elif rev is None:
1654 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1759 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1655
1760
1656 blocks = opts[b'blocks']
1761 blocks = opts[b'blocks']
1657 xdiff = opts[b'xdiff']
1762 xdiff = opts[b'xdiff']
1658 textpairs = []
1763 textpairs = []
1659
1764
1660 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1765 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1661
1766
1662 startrev = r.rev(r.lookup(rev))
1767 startrev = r.rev(r.lookup(rev))
1663 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1768 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1664 if opts[b'alldata']:
1769 if opts[b'alldata']:
1665 # Load revisions associated with changeset.
1770 # Load revisions associated with changeset.
1666 ctx = repo[rev]
1771 ctx = repo[rev]
1667 mtext = _manifestrevision(repo, ctx.manifestnode())
1772 mtext = _manifestrevision(repo, ctx.manifestnode())
1668 for pctx in ctx.parents():
1773 for pctx in ctx.parents():
1669 pman = _manifestrevision(repo, pctx.manifestnode())
1774 pman = _manifestrevision(repo, pctx.manifestnode())
1670 textpairs.append((pman, mtext))
1775 textpairs.append((pman, mtext))
1671
1776
1672 # Load filelog revisions by iterating manifest delta.
1777 # Load filelog revisions by iterating manifest delta.
1673 man = ctx.manifest()
1778 man = ctx.manifest()
1674 pman = ctx.p1().manifest()
1779 pman = ctx.p1().manifest()
1675 for filename, change in pman.diff(man).items():
1780 for filename, change in pman.diff(man).items():
1676 fctx = repo.file(filename)
1781 fctx = repo.file(filename)
1677 f1 = fctx.revision(change[0][0] or -1)
1782 f1 = fctx.revision(change[0][0] or -1)
1678 f2 = fctx.revision(change[1][0] or -1)
1783 f2 = fctx.revision(change[1][0] or -1)
1679 textpairs.append((f1, f2))
1784 textpairs.append((f1, f2))
1680 else:
1785 else:
1681 dp = r.deltaparent(rev)
1786 dp = r.deltaparent(rev)
1682 textpairs.append((r.revision(dp), r.revision(rev)))
1787 textpairs.append((r.revision(dp), r.revision(rev)))
1683
1788
1684 withthreads = threads > 0
1789 withthreads = threads > 0
1685 if not withthreads:
1790 if not withthreads:
1686 def d():
1791 def d():
1687 for pair in textpairs:
1792 for pair in textpairs:
1688 if xdiff:
1793 if xdiff:
1689 mdiff.bdiff.xdiffblocks(*pair)
1794 mdiff.bdiff.xdiffblocks(*pair)
1690 elif blocks:
1795 elif blocks:
1691 mdiff.bdiff.blocks(*pair)
1796 mdiff.bdiff.blocks(*pair)
1692 else:
1797 else:
1693 mdiff.textdiff(*pair)
1798 mdiff.textdiff(*pair)
1694 else:
1799 else:
1695 q = queue()
1800 q = queue()
1696 for i in _xrange(threads):
1801 for i in _xrange(threads):
1697 q.put(None)
1802 q.put(None)
1698 ready = threading.Condition()
1803 ready = threading.Condition()
1699 done = threading.Event()
1804 done = threading.Event()
1700 for i in _xrange(threads):
1805 for i in _xrange(threads):
1701 threading.Thread(target=_bdiffworker,
1806 threading.Thread(target=_bdiffworker,
1702 args=(q, blocks, xdiff, ready, done)).start()
1807 args=(q, blocks, xdiff, ready, done)).start()
1703 q.join()
1808 q.join()
1704 def d():
1809 def d():
1705 for pair in textpairs:
1810 for pair in textpairs:
1706 q.put(pair)
1811 q.put(pair)
1707 for i in _xrange(threads):
1812 for i in _xrange(threads):
1708 q.put(None)
1813 q.put(None)
1709 with ready:
1814 with ready:
1710 ready.notify_all()
1815 ready.notify_all()
1711 q.join()
1816 q.join()
1712 timer, fm = gettimer(ui, opts)
1817 timer, fm = gettimer(ui, opts)
1713 timer(d)
1818 timer(d)
1714 fm.end()
1819 fm.end()
1715
1820
1716 if withthreads:
1821 if withthreads:
1717 done.set()
1822 done.set()
1718 for i in _xrange(threads):
1823 for i in _xrange(threads):
1719 q.put(None)
1824 q.put(None)
1720 with ready:
1825 with ready:
1721 ready.notify_all()
1826 ready.notify_all()
1722
1827
1723 @command(b'perfunidiff', revlogopts + formatteropts + [
1828 @command(b'perfunidiff', revlogopts + formatteropts + [
1724 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1829 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1725 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1830 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1726 ], b'-c|-m|FILE REV')
1831 ], b'-c|-m|FILE REV')
1727 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1832 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1728 """benchmark a unified diff between revisions
1833 """benchmark a unified diff between revisions
1729
1834
1730 This doesn't include any copy tracing - it's just a unified diff
1835 This doesn't include any copy tracing - it's just a unified diff
1731 of the texts.
1836 of the texts.
1732
1837
1733 By default, benchmark a diff between its delta parent and itself.
1838 By default, benchmark a diff between its delta parent and itself.
1734
1839
1735 With ``--count``, benchmark diffs between delta parents and self for N
1840 With ``--count``, benchmark diffs between delta parents and self for N
1736 revisions starting at the specified revision.
1841 revisions starting at the specified revision.
1737
1842
1738 With ``--alldata``, assume the requested revision is a changeset and
1843 With ``--alldata``, assume the requested revision is a changeset and
1739 measure diffs for all changes related to that changeset (manifest
1844 measure diffs for all changes related to that changeset (manifest
1740 and filelogs).
1845 and filelogs).
1741 """
1846 """
1742 opts = _byteskwargs(opts)
1847 opts = _byteskwargs(opts)
1743 if opts[b'alldata']:
1848 if opts[b'alldata']:
1744 opts[b'changelog'] = True
1849 opts[b'changelog'] = True
1745
1850
1746 if opts.get(b'changelog') or opts.get(b'manifest'):
1851 if opts.get(b'changelog') or opts.get(b'manifest'):
1747 file_, rev = None, file_
1852 file_, rev = None, file_
1748 elif rev is None:
1853 elif rev is None:
1749 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1854 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1750
1855
1751 textpairs = []
1856 textpairs = []
1752
1857
1753 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1858 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1754
1859
1755 startrev = r.rev(r.lookup(rev))
1860 startrev = r.rev(r.lookup(rev))
1756 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1861 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1757 if opts[b'alldata']:
1862 if opts[b'alldata']:
1758 # Load revisions associated with changeset.
1863 # Load revisions associated with changeset.
1759 ctx = repo[rev]
1864 ctx = repo[rev]
1760 mtext = _manifestrevision(repo, ctx.manifestnode())
1865 mtext = _manifestrevision(repo, ctx.manifestnode())
1761 for pctx in ctx.parents():
1866 for pctx in ctx.parents():
1762 pman = _manifestrevision(repo, pctx.manifestnode())
1867 pman = _manifestrevision(repo, pctx.manifestnode())
1763 textpairs.append((pman, mtext))
1868 textpairs.append((pman, mtext))
1764
1869
1765 # Load filelog revisions by iterating manifest delta.
1870 # Load filelog revisions by iterating manifest delta.
1766 man = ctx.manifest()
1871 man = ctx.manifest()
1767 pman = ctx.p1().manifest()
1872 pman = ctx.p1().manifest()
1768 for filename, change in pman.diff(man).items():
1873 for filename, change in pman.diff(man).items():
1769 fctx = repo.file(filename)
1874 fctx = repo.file(filename)
1770 f1 = fctx.revision(change[0][0] or -1)
1875 f1 = fctx.revision(change[0][0] or -1)
1771 f2 = fctx.revision(change[1][0] or -1)
1876 f2 = fctx.revision(change[1][0] or -1)
1772 textpairs.append((f1, f2))
1877 textpairs.append((f1, f2))
1773 else:
1878 else:
1774 dp = r.deltaparent(rev)
1879 dp = r.deltaparent(rev)
1775 textpairs.append((r.revision(dp), r.revision(rev)))
1880 textpairs.append((r.revision(dp), r.revision(rev)))
1776
1881
1777 def d():
1882 def d():
1778 for left, right in textpairs:
1883 for left, right in textpairs:
1779 # The date strings don't matter, so we pass empty strings.
1884 # The date strings don't matter, so we pass empty strings.
1780 headerlines, hunks = mdiff.unidiff(
1885 headerlines, hunks = mdiff.unidiff(
1781 left, b'', right, b'', b'left', b'right', binary=False)
1886 left, b'', right, b'', b'left', b'right', binary=False)
1782 # consume iterators in roughly the way patch.py does
1887 # consume iterators in roughly the way patch.py does
1783 b'\n'.join(headerlines)
1888 b'\n'.join(headerlines)
1784 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1889 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1785 timer, fm = gettimer(ui, opts)
1890 timer, fm = gettimer(ui, opts)
1786 timer(d)
1891 timer(d)
1787 fm.end()
1892 fm.end()
1788
1893
1789 @command(b'perfdiffwd', formatteropts)
1894 @command(b'perfdiffwd', formatteropts)
1790 def perfdiffwd(ui, repo, **opts):
1895 def perfdiffwd(ui, repo, **opts):
1791 """Profile diff of working directory changes"""
1896 """Profile diff of working directory changes"""
1792 opts = _byteskwargs(opts)
1897 opts = _byteskwargs(opts)
1793 timer, fm = gettimer(ui, opts)
1898 timer, fm = gettimer(ui, opts)
1794 options = {
1899 options = {
1795 'w': 'ignore_all_space',
1900 'w': 'ignore_all_space',
1796 'b': 'ignore_space_change',
1901 'b': 'ignore_space_change',
1797 'B': 'ignore_blank_lines',
1902 'B': 'ignore_blank_lines',
1798 }
1903 }
1799
1904
1800 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1905 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1801 opts = dict((options[c], b'1') for c in diffopt)
1906 opts = dict((options[c], b'1') for c in diffopt)
1802 def d():
1907 def d():
1803 ui.pushbuffer()
1908 ui.pushbuffer()
1804 commands.diff(ui, repo, **opts)
1909 commands.diff(ui, repo, **opts)
1805 ui.popbuffer()
1910 ui.popbuffer()
1806 diffopt = diffopt.encode('ascii')
1911 diffopt = diffopt.encode('ascii')
1807 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1912 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1808 timer(d, title=title)
1913 timer(d, title=title)
1809 fm.end()
1914 fm.end()
1810
1915
1811 @command(b'perfrevlogindex', revlogopts + formatteropts,
1916 @command(b'perfrevlogindex', revlogopts + formatteropts,
1812 b'-c|-m|FILE')
1917 b'-c|-m|FILE')
1813 def perfrevlogindex(ui, repo, file_=None, **opts):
1918 def perfrevlogindex(ui, repo, file_=None, **opts):
1814 """Benchmark operations against a revlog index.
1919 """Benchmark operations against a revlog index.
1815
1920
1816 This tests constructing a revlog instance, reading index data,
1921 This tests constructing a revlog instance, reading index data,
1817 parsing index data, and performing various operations related to
1922 parsing index data, and performing various operations related to
1818 index data.
1923 index data.
1819 """
1924 """
1820
1925
1821 opts = _byteskwargs(opts)
1926 opts = _byteskwargs(opts)
1822
1927
1823 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1928 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1824
1929
1825 opener = getattr(rl, 'opener') # trick linter
1930 opener = getattr(rl, 'opener') # trick linter
1826 indexfile = rl.indexfile
1931 indexfile = rl.indexfile
1827 data = opener.read(indexfile)
1932 data = opener.read(indexfile)
1828
1933
1829 header = struct.unpack(b'>I', data[0:4])[0]
1934 header = struct.unpack(b'>I', data[0:4])[0]
1830 version = header & 0xFFFF
1935 version = header & 0xFFFF
1831 if version == 1:
1936 if version == 1:
1832 revlogio = revlog.revlogio()
1937 revlogio = revlog.revlogio()
1833 inline = header & (1 << 16)
1938 inline = header & (1 << 16)
1834 else:
1939 else:
1835 raise error.Abort((b'unsupported revlog version: %d') % version)
1940 raise error.Abort((b'unsupported revlog version: %d') % version)
1836
1941
1837 rllen = len(rl)
1942 rllen = len(rl)
1838
1943
1839 node0 = rl.node(0)
1944 node0 = rl.node(0)
1840 node25 = rl.node(rllen // 4)
1945 node25 = rl.node(rllen // 4)
1841 node50 = rl.node(rllen // 2)
1946 node50 = rl.node(rllen // 2)
1842 node75 = rl.node(rllen // 4 * 3)
1947 node75 = rl.node(rllen // 4 * 3)
1843 node100 = rl.node(rllen - 1)
1948 node100 = rl.node(rllen - 1)
1844
1949
1845 allrevs = range(rllen)
1950 allrevs = range(rllen)
1846 allrevsrev = list(reversed(allrevs))
1951 allrevsrev = list(reversed(allrevs))
1847 allnodes = [rl.node(rev) for rev in range(rllen)]
1952 allnodes = [rl.node(rev) for rev in range(rllen)]
1848 allnodesrev = list(reversed(allnodes))
1953 allnodesrev = list(reversed(allnodes))
1849
1954
1850 def constructor():
1955 def constructor():
1851 revlog.revlog(opener, indexfile)
1956 revlog.revlog(opener, indexfile)
1852
1957
1853 def read():
1958 def read():
1854 with opener(indexfile) as fh:
1959 with opener(indexfile) as fh:
1855 fh.read()
1960 fh.read()
1856
1961
1857 def parseindex():
1962 def parseindex():
1858 revlogio.parseindex(data, inline)
1963 revlogio.parseindex(data, inline)
1859
1964
1860 def getentry(revornode):
1965 def getentry(revornode):
1861 index = revlogio.parseindex(data, inline)[0]
1966 index = revlogio.parseindex(data, inline)[0]
1862 index[revornode]
1967 index[revornode]
1863
1968
1864 def getentries(revs, count=1):
1969 def getentries(revs, count=1):
1865 index = revlogio.parseindex(data, inline)[0]
1970 index = revlogio.parseindex(data, inline)[0]
1866
1971
1867 for i in range(count):
1972 for i in range(count):
1868 for rev in revs:
1973 for rev in revs:
1869 index[rev]
1974 index[rev]
1870
1975
1871 def resolvenode(node):
1976 def resolvenode(node):
1872 nodemap = revlogio.parseindex(data, inline)[1]
1977 nodemap = revlogio.parseindex(data, inline)[1]
1873 # This only works for the C code.
1978 # This only works for the C code.
1874 if nodemap is None:
1979 if nodemap is None:
1875 return
1980 return
1876
1981
1877 try:
1982 try:
1878 nodemap[node]
1983 nodemap[node]
1879 except error.RevlogError:
1984 except error.RevlogError:
1880 pass
1985 pass
1881
1986
1882 def resolvenodes(nodes, count=1):
1987 def resolvenodes(nodes, count=1):
1883 nodemap = revlogio.parseindex(data, inline)[1]
1988 nodemap = revlogio.parseindex(data, inline)[1]
1884 if nodemap is None:
1989 if nodemap is None:
1885 return
1990 return
1886
1991
1887 for i in range(count):
1992 for i in range(count):
1888 for node in nodes:
1993 for node in nodes:
1889 try:
1994 try:
1890 nodemap[node]
1995 nodemap[node]
1891 except error.RevlogError:
1996 except error.RevlogError:
1892 pass
1997 pass
1893
1998
1894 benches = [
1999 benches = [
1895 (constructor, b'revlog constructor'),
2000 (constructor, b'revlog constructor'),
1896 (read, b'read'),
2001 (read, b'read'),
1897 (parseindex, b'create index object'),
2002 (parseindex, b'create index object'),
1898 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2003 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1899 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2004 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1900 (lambda: resolvenode(node0), b'look up node at rev 0'),
2005 (lambda: resolvenode(node0), b'look up node at rev 0'),
1901 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2006 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1902 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2007 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1903 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2008 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1904 (lambda: resolvenode(node100), b'look up node at tip'),
2009 (lambda: resolvenode(node100), b'look up node at tip'),
1905 # 2x variation is to measure caching impact.
2010 # 2x variation is to measure caching impact.
1906 (lambda: resolvenodes(allnodes),
2011 (lambda: resolvenodes(allnodes),
1907 b'look up all nodes (forward)'),
2012 b'look up all nodes (forward)'),
1908 (lambda: resolvenodes(allnodes, 2),
2013 (lambda: resolvenodes(allnodes, 2),
1909 b'look up all nodes 2x (forward)'),
2014 b'look up all nodes 2x (forward)'),
1910 (lambda: resolvenodes(allnodesrev),
2015 (lambda: resolvenodes(allnodesrev),
1911 b'look up all nodes (reverse)'),
2016 b'look up all nodes (reverse)'),
1912 (lambda: resolvenodes(allnodesrev, 2),
2017 (lambda: resolvenodes(allnodesrev, 2),
1913 b'look up all nodes 2x (reverse)'),
2018 b'look up all nodes 2x (reverse)'),
1914 (lambda: getentries(allrevs),
2019 (lambda: getentries(allrevs),
1915 b'retrieve all index entries (forward)'),
2020 b'retrieve all index entries (forward)'),
1916 (lambda: getentries(allrevs, 2),
2021 (lambda: getentries(allrevs, 2),
1917 b'retrieve all index entries 2x (forward)'),
2022 b'retrieve all index entries 2x (forward)'),
1918 (lambda: getentries(allrevsrev),
2023 (lambda: getentries(allrevsrev),
1919 b'retrieve all index entries (reverse)'),
2024 b'retrieve all index entries (reverse)'),
1920 (lambda: getentries(allrevsrev, 2),
2025 (lambda: getentries(allrevsrev, 2),
1921 b'retrieve all index entries 2x (reverse)'),
2026 b'retrieve all index entries 2x (reverse)'),
1922 ]
2027 ]
1923
2028
1924 for fn, title in benches:
2029 for fn, title in benches:
1925 timer, fm = gettimer(ui, opts)
2030 timer, fm = gettimer(ui, opts)
1926 timer(fn, title=title)
2031 timer(fn, title=title)
1927 fm.end()
2032 fm.end()
1928
2033
1929 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2034 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1930 [(b'd', b'dist', 100, b'distance between the revisions'),
2035 [(b'd', b'dist', 100, b'distance between the revisions'),
1931 (b's', b'startrev', 0, b'revision to start reading at'),
2036 (b's', b'startrev', 0, b'revision to start reading at'),
1932 (b'', b'reverse', False, b'read in reverse')],
2037 (b'', b'reverse', False, b'read in reverse')],
1933 b'-c|-m|FILE')
2038 b'-c|-m|FILE')
1934 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2039 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1935 **opts):
2040 **opts):
1936 """Benchmark reading a series of revisions from a revlog.
2041 """Benchmark reading a series of revisions from a revlog.
1937
2042
1938 By default, we read every ``-d/--dist`` revision from 0 to tip of
2043 By default, we read every ``-d/--dist`` revision from 0 to tip of
1939 the specified revlog.
2044 the specified revlog.
1940
2045
1941 The start revision can be defined via ``-s/--startrev``.
2046 The start revision can be defined via ``-s/--startrev``.
1942 """
2047 """
1943 opts = _byteskwargs(opts)
2048 opts = _byteskwargs(opts)
1944
2049
1945 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2050 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1946 rllen = getlen(ui)(rl)
2051 rllen = getlen(ui)(rl)
1947
2052
1948 if startrev < 0:
2053 if startrev < 0:
1949 startrev = rllen + startrev
2054 startrev = rllen + startrev
1950
2055
1951 def d():
2056 def d():
1952 rl.clearcaches()
2057 rl.clearcaches()
1953
2058
1954 beginrev = startrev
2059 beginrev = startrev
1955 endrev = rllen
2060 endrev = rllen
1956 dist = opts[b'dist']
2061 dist = opts[b'dist']
1957
2062
1958 if reverse:
2063 if reverse:
1959 beginrev, endrev = endrev - 1, beginrev - 1
2064 beginrev, endrev = endrev - 1, beginrev - 1
1960 dist = -1 * dist
2065 dist = -1 * dist
1961
2066
1962 for x in _xrange(beginrev, endrev, dist):
2067 for x in _xrange(beginrev, endrev, dist):
1963 # Old revisions don't support passing int.
2068 # Old revisions don't support passing int.
1964 n = rl.node(x)
2069 n = rl.node(x)
1965 rl.revision(n)
2070 rl.revision(n)
1966
2071
1967 timer, fm = gettimer(ui, opts)
2072 timer, fm = gettimer(ui, opts)
1968 timer(d)
2073 timer(d)
1969 fm.end()
2074 fm.end()
1970
2075
1971 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2076 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1972 [(b's', b'startrev', 1000, b'revision to start writing at'),
2077 [(b's', b'startrev', 1000, b'revision to start writing at'),
1973 (b'', b'stoprev', -1, b'last revision to write'),
2078 (b'', b'stoprev', -1, b'last revision to write'),
1974 (b'', b'count', 3, b'last revision to write'),
2079 (b'', b'count', 3, b'last revision to write'),
1975 (b'', b'details', False, b'print timing for every revisions tested'),
2080 (b'', b'details', False, b'print timing for every revisions tested'),
1976 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2081 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1977 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2082 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1978 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2083 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1979 ],
2084 ],
1980 b'-c|-m|FILE')
2085 b'-c|-m|FILE')
1981 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2086 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1982 """Benchmark writing a series of revisions to a revlog.
2087 """Benchmark writing a series of revisions to a revlog.
1983
2088
1984 Possible source values are:
2089 Possible source values are:
1985 * `full`: add from a full text (default).
2090 * `full`: add from a full text (default).
1986 * `parent-1`: add from a delta to the first parent
2091 * `parent-1`: add from a delta to the first parent
1987 * `parent-2`: add from a delta to the second parent if it exists
2092 * `parent-2`: add from a delta to the second parent if it exists
1988 (use a delta from the first parent otherwise)
2093 (use a delta from the first parent otherwise)
1989 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2094 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1990 * `storage`: add from the existing precomputed deltas
2095 * `storage`: add from the existing precomputed deltas
1991 """
2096 """
1992 opts = _byteskwargs(opts)
2097 opts = _byteskwargs(opts)
1993
2098
1994 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2099 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1995 rllen = getlen(ui)(rl)
2100 rllen = getlen(ui)(rl)
1996 if startrev < 0:
2101 if startrev < 0:
1997 startrev = rllen + startrev
2102 startrev = rllen + startrev
1998 if stoprev < 0:
2103 if stoprev < 0:
1999 stoprev = rllen + stoprev
2104 stoprev = rllen + stoprev
2000
2105
2001 lazydeltabase = opts['lazydeltabase']
2106 lazydeltabase = opts['lazydeltabase']
2002 source = opts['source']
2107 source = opts['source']
2003 clearcaches = opts['clear_caches']
2108 clearcaches = opts['clear_caches']
2004 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2109 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2005 b'storage')
2110 b'storage')
2006 if source not in validsource:
2111 if source not in validsource:
2007 raise error.Abort('invalid source type: %s' % source)
2112 raise error.Abort('invalid source type: %s' % source)
2008
2113
2009 ### actually gather results
2114 ### actually gather results
2010 count = opts['count']
2115 count = opts['count']
2011 if count <= 0:
2116 if count <= 0:
2012 raise error.Abort('invalide run count: %d' % count)
2117 raise error.Abort('invalide run count: %d' % count)
2013 allresults = []
2118 allresults = []
2014 for c in range(count):
2119 for c in range(count):
2015 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2120 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2016 lazydeltabase=lazydeltabase,
2121 lazydeltabase=lazydeltabase,
2017 clearcaches=clearcaches)
2122 clearcaches=clearcaches)
2018 allresults.append(timing)
2123 allresults.append(timing)
2019
2124
2020 ### consolidate the results in a single list
2125 ### consolidate the results in a single list
2021 results = []
2126 results = []
2022 for idx, (rev, t) in enumerate(allresults[0]):
2127 for idx, (rev, t) in enumerate(allresults[0]):
2023 ts = [t]
2128 ts = [t]
2024 for other in allresults[1:]:
2129 for other in allresults[1:]:
2025 orev, ot = other[idx]
2130 orev, ot = other[idx]
2026 assert orev == rev
2131 assert orev == rev
2027 ts.append(ot)
2132 ts.append(ot)
2028 results.append((rev, ts))
2133 results.append((rev, ts))
2029 resultcount = len(results)
2134 resultcount = len(results)
2030
2135
2031 ### Compute and display relevant statistics
2136 ### Compute and display relevant statistics
2032
2137
2033 # get a formatter
2138 # get a formatter
2034 fm = ui.formatter(b'perf', opts)
2139 fm = ui.formatter(b'perf', opts)
2035 displayall = ui.configbool(b"perf", b"all-timing", False)
2140 displayall = ui.configbool(b"perf", b"all-timing", False)
2036
2141
2037 # print individual details if requested
2142 # print individual details if requested
2038 if opts['details']:
2143 if opts['details']:
2039 for idx, item in enumerate(results, 1):
2144 for idx, item in enumerate(results, 1):
2040 rev, data = item
2145 rev, data = item
2041 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2146 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2042 formatone(fm, data, title=title, displayall=displayall)
2147 formatone(fm, data, title=title, displayall=displayall)
2043
2148
2044 # sorts results by median time
2149 # sorts results by median time
2045 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2150 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2046 # list of (name, index) to display)
2151 # list of (name, index) to display)
2047 relevants = [
2152 relevants = [
2048 ("min", 0),
2153 ("min", 0),
2049 ("10%", resultcount * 10 // 100),
2154 ("10%", resultcount * 10 // 100),
2050 ("25%", resultcount * 25 // 100),
2155 ("25%", resultcount * 25 // 100),
2051 ("50%", resultcount * 70 // 100),
2156 ("50%", resultcount * 70 // 100),
2052 ("75%", resultcount * 75 // 100),
2157 ("75%", resultcount * 75 // 100),
2053 ("90%", resultcount * 90 // 100),
2158 ("90%", resultcount * 90 // 100),
2054 ("95%", resultcount * 95 // 100),
2159 ("95%", resultcount * 95 // 100),
2055 ("99%", resultcount * 99 // 100),
2160 ("99%", resultcount * 99 // 100),
2056 ("99.9%", resultcount * 999 // 1000),
2161 ("99.9%", resultcount * 999 // 1000),
2057 ("99.99%", resultcount * 9999 // 10000),
2162 ("99.99%", resultcount * 9999 // 10000),
2058 ("99.999%", resultcount * 99999 // 100000),
2163 ("99.999%", resultcount * 99999 // 100000),
2059 ("max", -1),
2164 ("max", -1),
2060 ]
2165 ]
2061 if not ui.quiet:
2166 if not ui.quiet:
2062 for name, idx in relevants:
2167 for name, idx in relevants:
2063 data = results[idx]
2168 data = results[idx]
2064 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2169 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2065 formatone(fm, data[1], title=title, displayall=displayall)
2170 formatone(fm, data[1], title=title, displayall=displayall)
2066
2171
2067 # XXX summing that many float will not be very precise, we ignore this fact
2172 # XXX summing that many float will not be very precise, we ignore this fact
2068 # for now
2173 # for now
2069 totaltime = []
2174 totaltime = []
2070 for item in allresults:
2175 for item in allresults:
2071 totaltime.append((sum(x[1][0] for x in item),
2176 totaltime.append((sum(x[1][0] for x in item),
2072 sum(x[1][1] for x in item),
2177 sum(x[1][1] for x in item),
2073 sum(x[1][2] for x in item),)
2178 sum(x[1][2] for x in item),)
2074 )
2179 )
2075 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2180 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2076 displayall=displayall)
2181 displayall=displayall)
2077 fm.end()
2182 fm.end()
2078
2183
2079 class _faketr(object):
2184 class _faketr(object):
2080 def add(s, x, y, z=None):
2185 def add(s, x, y, z=None):
2081 return None
2186 return None
2082
2187
2083 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2188 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2084 lazydeltabase=True, clearcaches=True):
2189 lazydeltabase=True, clearcaches=True):
2085 timings = []
2190 timings = []
2086 tr = _faketr()
2191 tr = _faketr()
2087 with _temprevlog(ui, orig, startrev) as dest:
2192 with _temprevlog(ui, orig, startrev) as dest:
2088 dest._lazydeltabase = lazydeltabase
2193 dest._lazydeltabase = lazydeltabase
2089 revs = list(orig.revs(startrev, stoprev))
2194 revs = list(orig.revs(startrev, stoprev))
2090 total = len(revs)
2195 total = len(revs)
2091 topic = 'adding'
2196 topic = 'adding'
2092 if runidx is not None:
2197 if runidx is not None:
2093 topic += ' (run #%d)' % runidx
2198 topic += ' (run #%d)' % runidx
2094 # Support both old and new progress API
2199 # Support both old and new progress API
2095 if util.safehasattr(ui, 'makeprogress'):
2200 if util.safehasattr(ui, 'makeprogress'):
2096 progress = ui.makeprogress(topic, unit='revs', total=total)
2201 progress = ui.makeprogress(topic, unit='revs', total=total)
2097 def updateprogress(pos):
2202 def updateprogress(pos):
2098 progress.update(pos)
2203 progress.update(pos)
2099 def completeprogress():
2204 def completeprogress():
2100 progress.complete()
2205 progress.complete()
2101 else:
2206 else:
2102 def updateprogress(pos):
2207 def updateprogress(pos):
2103 ui.progress(topic, pos, unit='revs', total=total)
2208 ui.progress(topic, pos, unit='revs', total=total)
2104 def completeprogress():
2209 def completeprogress():
2105 ui.progress(topic, None, unit='revs', total=total)
2210 ui.progress(topic, None, unit='revs', total=total)
2106
2211
2107 for idx, rev in enumerate(revs):
2212 for idx, rev in enumerate(revs):
2108 updateprogress(idx)
2213 updateprogress(idx)
2109 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2214 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2110 if clearcaches:
2215 if clearcaches:
2111 dest.index.clearcaches()
2216 dest.index.clearcaches()
2112 dest.clearcaches()
2217 dest.clearcaches()
2113 with timeone() as r:
2218 with timeone() as r:
2114 dest.addrawrevision(*addargs, **addkwargs)
2219 dest.addrawrevision(*addargs, **addkwargs)
2115 timings.append((rev, r[0]))
2220 timings.append((rev, r[0]))
2116 updateprogress(total)
2221 updateprogress(total)
2117 completeprogress()
2222 completeprogress()
2118 return timings
2223 return timings
2119
2224
2120 def _getrevisionseed(orig, rev, tr, source):
2225 def _getrevisionseed(orig, rev, tr, source):
2121 from mercurial.node import nullid
2226 from mercurial.node import nullid
2122
2227
2123 linkrev = orig.linkrev(rev)
2228 linkrev = orig.linkrev(rev)
2124 node = orig.node(rev)
2229 node = orig.node(rev)
2125 p1, p2 = orig.parents(node)
2230 p1, p2 = orig.parents(node)
2126 flags = orig.flags(rev)
2231 flags = orig.flags(rev)
2127 cachedelta = None
2232 cachedelta = None
2128 text = None
2233 text = None
2129
2234
2130 if source == b'full':
2235 if source == b'full':
2131 text = orig.revision(rev)
2236 text = orig.revision(rev)
2132 elif source == b'parent-1':
2237 elif source == b'parent-1':
2133 baserev = orig.rev(p1)
2238 baserev = orig.rev(p1)
2134 cachedelta = (baserev, orig.revdiff(p1, rev))
2239 cachedelta = (baserev, orig.revdiff(p1, rev))
2135 elif source == b'parent-2':
2240 elif source == b'parent-2':
2136 parent = p2
2241 parent = p2
2137 if p2 == nullid:
2242 if p2 == nullid:
2138 parent = p1
2243 parent = p1
2139 baserev = orig.rev(parent)
2244 baserev = orig.rev(parent)
2140 cachedelta = (baserev, orig.revdiff(parent, rev))
2245 cachedelta = (baserev, orig.revdiff(parent, rev))
2141 elif source == b'parent-smallest':
2246 elif source == b'parent-smallest':
2142 p1diff = orig.revdiff(p1, rev)
2247 p1diff = orig.revdiff(p1, rev)
2143 parent = p1
2248 parent = p1
2144 diff = p1diff
2249 diff = p1diff
2145 if p2 != nullid:
2250 if p2 != nullid:
2146 p2diff = orig.revdiff(p2, rev)
2251 p2diff = orig.revdiff(p2, rev)
2147 if len(p1diff) > len(p2diff):
2252 if len(p1diff) > len(p2diff):
2148 parent = p2
2253 parent = p2
2149 diff = p2diff
2254 diff = p2diff
2150 baserev = orig.rev(parent)
2255 baserev = orig.rev(parent)
2151 cachedelta = (baserev, diff)
2256 cachedelta = (baserev, diff)
2152 elif source == b'storage':
2257 elif source == b'storage':
2153 baserev = orig.deltaparent(rev)
2258 baserev = orig.deltaparent(rev)
2154 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2259 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2155
2260
2156 return ((text, tr, linkrev, p1, p2),
2261 return ((text, tr, linkrev, p1, p2),
2157 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2262 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2158
2263
2159 @contextlib.contextmanager
2264 @contextlib.contextmanager
2160 def _temprevlog(ui, orig, truncaterev):
2265 def _temprevlog(ui, orig, truncaterev):
2161 from mercurial import vfs as vfsmod
2266 from mercurial import vfs as vfsmod
2162
2267
2163 if orig._inline:
2268 if orig._inline:
2164 raise error.Abort('not supporting inline revlog (yet)')
2269 raise error.Abort('not supporting inline revlog (yet)')
2165
2270
2166 origindexpath = orig.opener.join(orig.indexfile)
2271 origindexpath = orig.opener.join(orig.indexfile)
2167 origdatapath = orig.opener.join(orig.datafile)
2272 origdatapath = orig.opener.join(orig.datafile)
2168 indexname = 'revlog.i'
2273 indexname = 'revlog.i'
2169 dataname = 'revlog.d'
2274 dataname = 'revlog.d'
2170
2275
2171 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2276 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2172 try:
2277 try:
2173 # copy the data file in a temporary directory
2278 # copy the data file in a temporary directory
2174 ui.debug('copying data in %s\n' % tmpdir)
2279 ui.debug('copying data in %s\n' % tmpdir)
2175 destindexpath = os.path.join(tmpdir, 'revlog.i')
2280 destindexpath = os.path.join(tmpdir, 'revlog.i')
2176 destdatapath = os.path.join(tmpdir, 'revlog.d')
2281 destdatapath = os.path.join(tmpdir, 'revlog.d')
2177 shutil.copyfile(origindexpath, destindexpath)
2282 shutil.copyfile(origindexpath, destindexpath)
2178 shutil.copyfile(origdatapath, destdatapath)
2283 shutil.copyfile(origdatapath, destdatapath)
2179
2284
2180 # remove the data we want to add again
2285 # remove the data we want to add again
2181 ui.debug('truncating data to be rewritten\n')
2286 ui.debug('truncating data to be rewritten\n')
2182 with open(destindexpath, 'ab') as index:
2287 with open(destindexpath, 'ab') as index:
2183 index.seek(0)
2288 index.seek(0)
2184 index.truncate(truncaterev * orig._io.size)
2289 index.truncate(truncaterev * orig._io.size)
2185 with open(destdatapath, 'ab') as data:
2290 with open(destdatapath, 'ab') as data:
2186 data.seek(0)
2291 data.seek(0)
2187 data.truncate(orig.start(truncaterev))
2292 data.truncate(orig.start(truncaterev))
2188
2293
2189 # instantiate a new revlog from the temporary copy
2294 # instantiate a new revlog from the temporary copy
2190 ui.debug('truncating adding to be rewritten\n')
2295 ui.debug('truncating adding to be rewritten\n')
2191 vfs = vfsmod.vfs(tmpdir)
2296 vfs = vfsmod.vfs(tmpdir)
2192 vfs.options = getattr(orig.opener, 'options', None)
2297 vfs.options = getattr(orig.opener, 'options', None)
2193
2298
2194 dest = revlog.revlog(vfs,
2299 dest = revlog.revlog(vfs,
2195 indexfile=indexname,
2300 indexfile=indexname,
2196 datafile=dataname)
2301 datafile=dataname)
2197 if dest._inline:
2302 if dest._inline:
2198 raise error.Abort('not supporting inline revlog (yet)')
2303 raise error.Abort('not supporting inline revlog (yet)')
2199 # make sure internals are initialized
2304 # make sure internals are initialized
2200 dest.revision(len(dest) - 1)
2305 dest.revision(len(dest) - 1)
2201 yield dest
2306 yield dest
2202 del dest, vfs
2307 del dest, vfs
2203 finally:
2308 finally:
2204 shutil.rmtree(tmpdir, True)
2309 shutil.rmtree(tmpdir, True)
2205
2310
2206 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2311 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2207 [(b'e', b'engines', b'', b'compression engines to use'),
2312 [(b'e', b'engines', b'', b'compression engines to use'),
2208 (b's', b'startrev', 0, b'revision to start at')],
2313 (b's', b'startrev', 0, b'revision to start at')],
2209 b'-c|-m|FILE')
2314 b'-c|-m|FILE')
2210 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2315 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2211 """Benchmark operations on revlog chunks.
2316 """Benchmark operations on revlog chunks.
2212
2317
2213 Logically, each revlog is a collection of fulltext revisions. However,
2318 Logically, each revlog is a collection of fulltext revisions. However,
2214 stored within each revlog are "chunks" of possibly compressed data. This
2319 stored within each revlog are "chunks" of possibly compressed data. This
2215 data needs to be read and decompressed or compressed and written.
2320 data needs to be read and decompressed or compressed and written.
2216
2321
2217 This command measures the time it takes to read+decompress and recompress
2322 This command measures the time it takes to read+decompress and recompress
2218 chunks in a revlog. It effectively isolates I/O and compression performance.
2323 chunks in a revlog. It effectively isolates I/O and compression performance.
2219 For measurements of higher-level operations like resolving revisions,
2324 For measurements of higher-level operations like resolving revisions,
2220 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2325 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2221 """
2326 """
2222 opts = _byteskwargs(opts)
2327 opts = _byteskwargs(opts)
2223
2328
2224 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2329 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2225
2330
2226 # _chunkraw was renamed to _getsegmentforrevs.
2331 # _chunkraw was renamed to _getsegmentforrevs.
2227 try:
2332 try:
2228 segmentforrevs = rl._getsegmentforrevs
2333 segmentforrevs = rl._getsegmentforrevs
2229 except AttributeError:
2334 except AttributeError:
2230 segmentforrevs = rl._chunkraw
2335 segmentforrevs = rl._chunkraw
2231
2336
2232 # Verify engines argument.
2337 # Verify engines argument.
2233 if engines:
2338 if engines:
2234 engines = set(e.strip() for e in engines.split(b','))
2339 engines = set(e.strip() for e in engines.split(b','))
2235 for engine in engines:
2340 for engine in engines:
2236 try:
2341 try:
2237 util.compressionengines[engine]
2342 util.compressionengines[engine]
2238 except KeyError:
2343 except KeyError:
2239 raise error.Abort(b'unknown compression engine: %s' % engine)
2344 raise error.Abort(b'unknown compression engine: %s' % engine)
2240 else:
2345 else:
2241 engines = []
2346 engines = []
2242 for e in util.compengines:
2347 for e in util.compengines:
2243 engine = util.compengines[e]
2348 engine = util.compengines[e]
2244 try:
2349 try:
2245 if engine.available():
2350 if engine.available():
2246 engine.revlogcompressor().compress(b'dummy')
2351 engine.revlogcompressor().compress(b'dummy')
2247 engines.append(e)
2352 engines.append(e)
2248 except NotImplementedError:
2353 except NotImplementedError:
2249 pass
2354 pass
2250
2355
2251 revs = list(rl.revs(startrev, len(rl) - 1))
2356 revs = list(rl.revs(startrev, len(rl) - 1))
2252
2357
2253 def rlfh(rl):
2358 def rlfh(rl):
2254 if rl._inline:
2359 if rl._inline:
2255 return getsvfs(repo)(rl.indexfile)
2360 return getsvfs(repo)(rl.indexfile)
2256 else:
2361 else:
2257 return getsvfs(repo)(rl.datafile)
2362 return getsvfs(repo)(rl.datafile)
2258
2363
2259 def doread():
2364 def doread():
2260 rl.clearcaches()
2365 rl.clearcaches()
2261 for rev in revs:
2366 for rev in revs:
2262 segmentforrevs(rev, rev)
2367 segmentforrevs(rev, rev)
2263
2368
2264 def doreadcachedfh():
2369 def doreadcachedfh():
2265 rl.clearcaches()
2370 rl.clearcaches()
2266 fh = rlfh(rl)
2371 fh = rlfh(rl)
2267 for rev in revs:
2372 for rev in revs:
2268 segmentforrevs(rev, rev, df=fh)
2373 segmentforrevs(rev, rev, df=fh)
2269
2374
2270 def doreadbatch():
2375 def doreadbatch():
2271 rl.clearcaches()
2376 rl.clearcaches()
2272 segmentforrevs(revs[0], revs[-1])
2377 segmentforrevs(revs[0], revs[-1])
2273
2378
2274 def doreadbatchcachedfh():
2379 def doreadbatchcachedfh():
2275 rl.clearcaches()
2380 rl.clearcaches()
2276 fh = rlfh(rl)
2381 fh = rlfh(rl)
2277 segmentforrevs(revs[0], revs[-1], df=fh)
2382 segmentforrevs(revs[0], revs[-1], df=fh)
2278
2383
2279 def dochunk():
2384 def dochunk():
2280 rl.clearcaches()
2385 rl.clearcaches()
2281 fh = rlfh(rl)
2386 fh = rlfh(rl)
2282 for rev in revs:
2387 for rev in revs:
2283 rl._chunk(rev, df=fh)
2388 rl._chunk(rev, df=fh)
2284
2389
2285 chunks = [None]
2390 chunks = [None]
2286
2391
2287 def dochunkbatch():
2392 def dochunkbatch():
2288 rl.clearcaches()
2393 rl.clearcaches()
2289 fh = rlfh(rl)
2394 fh = rlfh(rl)
2290 # Save chunks as a side-effect.
2395 # Save chunks as a side-effect.
2291 chunks[0] = rl._chunks(revs, df=fh)
2396 chunks[0] = rl._chunks(revs, df=fh)
2292
2397
2293 def docompress(compressor):
2398 def docompress(compressor):
2294 rl.clearcaches()
2399 rl.clearcaches()
2295
2400
2296 try:
2401 try:
2297 # Swap in the requested compression engine.
2402 # Swap in the requested compression engine.
2298 oldcompressor = rl._compressor
2403 oldcompressor = rl._compressor
2299 rl._compressor = compressor
2404 rl._compressor = compressor
2300 for chunk in chunks[0]:
2405 for chunk in chunks[0]:
2301 rl.compress(chunk)
2406 rl.compress(chunk)
2302 finally:
2407 finally:
2303 rl._compressor = oldcompressor
2408 rl._compressor = oldcompressor
2304
2409
2305 benches = [
2410 benches = [
2306 (lambda: doread(), b'read'),
2411 (lambda: doread(), b'read'),
2307 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2412 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2308 (lambda: doreadbatch(), b'read batch'),
2413 (lambda: doreadbatch(), b'read batch'),
2309 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2414 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2310 (lambda: dochunk(), b'chunk'),
2415 (lambda: dochunk(), b'chunk'),
2311 (lambda: dochunkbatch(), b'chunk batch'),
2416 (lambda: dochunkbatch(), b'chunk batch'),
2312 ]
2417 ]
2313
2418
2314 for engine in sorted(engines):
2419 for engine in sorted(engines):
2315 compressor = util.compengines[engine].revlogcompressor()
2420 compressor = util.compengines[engine].revlogcompressor()
2316 benches.append((functools.partial(docompress, compressor),
2421 benches.append((functools.partial(docompress, compressor),
2317 b'compress w/ %s' % engine))
2422 b'compress w/ %s' % engine))
2318
2423
2319 for fn, title in benches:
2424 for fn, title in benches:
2320 timer, fm = gettimer(ui, opts)
2425 timer, fm = gettimer(ui, opts)
2321 timer(fn, title=title)
2426 timer(fn, title=title)
2322 fm.end()
2427 fm.end()
2323
2428
2324 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2429 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2325 [(b'', b'cache', False, b'use caches instead of clearing')],
2430 [(b'', b'cache', False, b'use caches instead of clearing')],
2326 b'-c|-m|FILE REV')
2431 b'-c|-m|FILE REV')
2327 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2432 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2328 """Benchmark obtaining a revlog revision.
2433 """Benchmark obtaining a revlog revision.
2329
2434
2330 Obtaining a revlog revision consists of roughly the following steps:
2435 Obtaining a revlog revision consists of roughly the following steps:
2331
2436
2332 1. Compute the delta chain
2437 1. Compute the delta chain
2333 2. Slice the delta chain if applicable
2438 2. Slice the delta chain if applicable
2334 3. Obtain the raw chunks for that delta chain
2439 3. Obtain the raw chunks for that delta chain
2335 4. Decompress each raw chunk
2440 4. Decompress each raw chunk
2336 5. Apply binary patches to obtain fulltext
2441 5. Apply binary patches to obtain fulltext
2337 6. Verify hash of fulltext
2442 6. Verify hash of fulltext
2338
2443
2339 This command measures the time spent in each of these phases.
2444 This command measures the time spent in each of these phases.
2340 """
2445 """
2341 opts = _byteskwargs(opts)
2446 opts = _byteskwargs(opts)
2342
2447
2343 if opts.get(b'changelog') or opts.get(b'manifest'):
2448 if opts.get(b'changelog') or opts.get(b'manifest'):
2344 file_, rev = None, file_
2449 file_, rev = None, file_
2345 elif rev is None:
2450 elif rev is None:
2346 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2451 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2347
2452
2348 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2453 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2349
2454
2350 # _chunkraw was renamed to _getsegmentforrevs.
2455 # _chunkraw was renamed to _getsegmentforrevs.
2351 try:
2456 try:
2352 segmentforrevs = r._getsegmentforrevs
2457 segmentforrevs = r._getsegmentforrevs
2353 except AttributeError:
2458 except AttributeError:
2354 segmentforrevs = r._chunkraw
2459 segmentforrevs = r._chunkraw
2355
2460
2356 node = r.lookup(rev)
2461 node = r.lookup(rev)
2357 rev = r.rev(node)
2462 rev = r.rev(node)
2358
2463
2359 def getrawchunks(data, chain):
2464 def getrawchunks(data, chain):
2360 start = r.start
2465 start = r.start
2361 length = r.length
2466 length = r.length
2362 inline = r._inline
2467 inline = r._inline
2363 iosize = r._io.size
2468 iosize = r._io.size
2364 buffer = util.buffer
2469 buffer = util.buffer
2365
2470
2366 chunks = []
2471 chunks = []
2367 ladd = chunks.append
2472 ladd = chunks.append
2368 for idx, item in enumerate(chain):
2473 for idx, item in enumerate(chain):
2369 offset = start(item[0])
2474 offset = start(item[0])
2370 bits = data[idx]
2475 bits = data[idx]
2371 for rev in item:
2476 for rev in item:
2372 chunkstart = start(rev)
2477 chunkstart = start(rev)
2373 if inline:
2478 if inline:
2374 chunkstart += (rev + 1) * iosize
2479 chunkstart += (rev + 1) * iosize
2375 chunklength = length(rev)
2480 chunklength = length(rev)
2376 ladd(buffer(bits, chunkstart - offset, chunklength))
2481 ladd(buffer(bits, chunkstart - offset, chunklength))
2377
2482
2378 return chunks
2483 return chunks
2379
2484
2380 def dodeltachain(rev):
2485 def dodeltachain(rev):
2381 if not cache:
2486 if not cache:
2382 r.clearcaches()
2487 r.clearcaches()
2383 r._deltachain(rev)
2488 r._deltachain(rev)
2384
2489
2385 def doread(chain):
2490 def doread(chain):
2386 if not cache:
2491 if not cache:
2387 r.clearcaches()
2492 r.clearcaches()
2388 for item in slicedchain:
2493 for item in slicedchain:
2389 segmentforrevs(item[0], item[-1])
2494 segmentforrevs(item[0], item[-1])
2390
2495
2391 def doslice(r, chain, size):
2496 def doslice(r, chain, size):
2392 for s in slicechunk(r, chain, targetsize=size):
2497 for s in slicechunk(r, chain, targetsize=size):
2393 pass
2498 pass
2394
2499
2395 def dorawchunks(data, chain):
2500 def dorawchunks(data, chain):
2396 if not cache:
2501 if not cache:
2397 r.clearcaches()
2502 r.clearcaches()
2398 getrawchunks(data, chain)
2503 getrawchunks(data, chain)
2399
2504
2400 def dodecompress(chunks):
2505 def dodecompress(chunks):
2401 decomp = r.decompress
2506 decomp = r.decompress
2402 for chunk in chunks:
2507 for chunk in chunks:
2403 decomp(chunk)
2508 decomp(chunk)
2404
2509
2405 def dopatch(text, bins):
2510 def dopatch(text, bins):
2406 if not cache:
2511 if not cache:
2407 r.clearcaches()
2512 r.clearcaches()
2408 mdiff.patches(text, bins)
2513 mdiff.patches(text, bins)
2409
2514
2410 def dohash(text):
2515 def dohash(text):
2411 if not cache:
2516 if not cache:
2412 r.clearcaches()
2517 r.clearcaches()
2413 r.checkhash(text, node, rev=rev)
2518 r.checkhash(text, node, rev=rev)
2414
2519
2415 def dorevision():
2520 def dorevision():
2416 if not cache:
2521 if not cache:
2417 r.clearcaches()
2522 r.clearcaches()
2418 r.revision(node)
2523 r.revision(node)
2419
2524
2420 try:
2525 try:
2421 from mercurial.revlogutils.deltas import slicechunk
2526 from mercurial.revlogutils.deltas import slicechunk
2422 except ImportError:
2527 except ImportError:
2423 slicechunk = getattr(revlog, '_slicechunk', None)
2528 slicechunk = getattr(revlog, '_slicechunk', None)
2424
2529
2425 size = r.length(rev)
2530 size = r.length(rev)
2426 chain = r._deltachain(rev)[0]
2531 chain = r._deltachain(rev)[0]
2427 if not getattr(r, '_withsparseread', False):
2532 if not getattr(r, '_withsparseread', False):
2428 slicedchain = (chain,)
2533 slicedchain = (chain,)
2429 else:
2534 else:
2430 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2535 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2431 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2536 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2432 rawchunks = getrawchunks(data, slicedchain)
2537 rawchunks = getrawchunks(data, slicedchain)
2433 bins = r._chunks(chain)
2538 bins = r._chunks(chain)
2434 text = bytes(bins[0])
2539 text = bytes(bins[0])
2435 bins = bins[1:]
2540 bins = bins[1:]
2436 text = mdiff.patches(text, bins)
2541 text = mdiff.patches(text, bins)
2437
2542
2438 benches = [
2543 benches = [
2439 (lambda: dorevision(), b'full'),
2544 (lambda: dorevision(), b'full'),
2440 (lambda: dodeltachain(rev), b'deltachain'),
2545 (lambda: dodeltachain(rev), b'deltachain'),
2441 (lambda: doread(chain), b'read'),
2546 (lambda: doread(chain), b'read'),
2442 ]
2547 ]
2443
2548
2444 if getattr(r, '_withsparseread', False):
2549 if getattr(r, '_withsparseread', False):
2445 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2550 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2446 benches.append(slicing)
2551 benches.append(slicing)
2447
2552
2448 benches.extend([
2553 benches.extend([
2449 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2554 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2450 (lambda: dodecompress(rawchunks), b'decompress'),
2555 (lambda: dodecompress(rawchunks), b'decompress'),
2451 (lambda: dopatch(text, bins), b'patch'),
2556 (lambda: dopatch(text, bins), b'patch'),
2452 (lambda: dohash(text), b'hash'),
2557 (lambda: dohash(text), b'hash'),
2453 ])
2558 ])
2454
2559
2455 timer, fm = gettimer(ui, opts)
2560 timer, fm = gettimer(ui, opts)
2456 for fn, title in benches:
2561 for fn, title in benches:
2457 timer(fn, title=title)
2562 timer(fn, title=title)
2458 fm.end()
2563 fm.end()
2459
2564
2460 @command(b'perfrevset',
2565 @command(b'perfrevset',
2461 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2566 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2462 (b'', b'contexts', False, b'obtain changectx for each revision')]
2567 (b'', b'contexts', False, b'obtain changectx for each revision')]
2463 + formatteropts, b"REVSET")
2568 + formatteropts, b"REVSET")
2464 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2569 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2465 """benchmark the execution time of a revset
2570 """benchmark the execution time of a revset
2466
2571
2467 Use the --clean option if need to evaluate the impact of build volatile
2572 Use the --clean option if need to evaluate the impact of build volatile
2468 revisions set cache on the revset execution. Volatile cache hold filtered
2573 revisions set cache on the revset execution. Volatile cache hold filtered
2469 and obsolete related cache."""
2574 and obsolete related cache."""
2470 opts = _byteskwargs(opts)
2575 opts = _byteskwargs(opts)
2471
2576
2472 timer, fm = gettimer(ui, opts)
2577 timer, fm = gettimer(ui, opts)
2473 def d():
2578 def d():
2474 if clear:
2579 if clear:
2475 repo.invalidatevolatilesets()
2580 repo.invalidatevolatilesets()
2476 if contexts:
2581 if contexts:
2477 for ctx in repo.set(expr): pass
2582 for ctx in repo.set(expr): pass
2478 else:
2583 else:
2479 for r in repo.revs(expr): pass
2584 for r in repo.revs(expr): pass
2480 timer(d)
2585 timer(d)
2481 fm.end()
2586 fm.end()
2482
2587
2483 @command(b'perfvolatilesets',
2588 @command(b'perfvolatilesets',
2484 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2589 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2485 ] + formatteropts)
2590 ] + formatteropts)
2486 def perfvolatilesets(ui, repo, *names, **opts):
2591 def perfvolatilesets(ui, repo, *names, **opts):
2487 """benchmark the computation of various volatile set
2592 """benchmark the computation of various volatile set
2488
2593
2489 Volatile set computes element related to filtering and obsolescence."""
2594 Volatile set computes element related to filtering and obsolescence."""
2490 opts = _byteskwargs(opts)
2595 opts = _byteskwargs(opts)
2491 timer, fm = gettimer(ui, opts)
2596 timer, fm = gettimer(ui, opts)
2492 repo = repo.unfiltered()
2597 repo = repo.unfiltered()
2493
2598
2494 def getobs(name):
2599 def getobs(name):
2495 def d():
2600 def d():
2496 repo.invalidatevolatilesets()
2601 repo.invalidatevolatilesets()
2497 if opts[b'clear_obsstore']:
2602 if opts[b'clear_obsstore']:
2498 clearfilecache(repo, b'obsstore')
2603 clearfilecache(repo, b'obsstore')
2499 obsolete.getrevs(repo, name)
2604 obsolete.getrevs(repo, name)
2500 return d
2605 return d
2501
2606
2502 allobs = sorted(obsolete.cachefuncs)
2607 allobs = sorted(obsolete.cachefuncs)
2503 if names:
2608 if names:
2504 allobs = [n for n in allobs if n in names]
2609 allobs = [n for n in allobs if n in names]
2505
2610
2506 for name in allobs:
2611 for name in allobs:
2507 timer(getobs(name), title=name)
2612 timer(getobs(name), title=name)
2508
2613
2509 def getfiltered(name):
2614 def getfiltered(name):
2510 def d():
2615 def d():
2511 repo.invalidatevolatilesets()
2616 repo.invalidatevolatilesets()
2512 if opts[b'clear_obsstore']:
2617 if opts[b'clear_obsstore']:
2513 clearfilecache(repo, b'obsstore')
2618 clearfilecache(repo, b'obsstore')
2514 repoview.filterrevs(repo, name)
2619 repoview.filterrevs(repo, name)
2515 return d
2620 return d
2516
2621
2517 allfilter = sorted(repoview.filtertable)
2622 allfilter = sorted(repoview.filtertable)
2518 if names:
2623 if names:
2519 allfilter = [n for n in allfilter if n in names]
2624 allfilter = [n for n in allfilter if n in names]
2520
2625
2521 for name in allfilter:
2626 for name in allfilter:
2522 timer(getfiltered(name), title=name)
2627 timer(getfiltered(name), title=name)
2523 fm.end()
2628 fm.end()
2524
2629
2525 @command(b'perfbranchmap',
2630 @command(b'perfbranchmap',
2526 [(b'f', b'full', False,
2631 [(b'f', b'full', False,
2527 b'Includes build time of subset'),
2632 b'Includes build time of subset'),
2528 (b'', b'clear-revbranch', False,
2633 (b'', b'clear-revbranch', False,
2529 b'purge the revbranch cache between computation'),
2634 b'purge the revbranch cache between computation'),
2530 ] + formatteropts)
2635 ] + formatteropts)
2531 def perfbranchmap(ui, repo, *filternames, **opts):
2636 def perfbranchmap(ui, repo, *filternames, **opts):
2532 """benchmark the update of a branchmap
2637 """benchmark the update of a branchmap
2533
2638
2534 This benchmarks the full repo.branchmap() call with read and write disabled
2639 This benchmarks the full repo.branchmap() call with read and write disabled
2535 """
2640 """
2536 opts = _byteskwargs(opts)
2641 opts = _byteskwargs(opts)
2537 full = opts.get(b"full", False)
2642 full = opts.get(b"full", False)
2538 clear_revbranch = opts.get(b"clear_revbranch", False)
2643 clear_revbranch = opts.get(b"clear_revbranch", False)
2539 timer, fm = gettimer(ui, opts)
2644 timer, fm = gettimer(ui, opts)
2540 def getbranchmap(filtername):
2645 def getbranchmap(filtername):
2541 """generate a benchmark function for the filtername"""
2646 """generate a benchmark function for the filtername"""
2542 if filtername is None:
2647 if filtername is None:
2543 view = repo
2648 view = repo
2544 else:
2649 else:
2545 view = repo.filtered(filtername)
2650 view = repo.filtered(filtername)
2546 if util.safehasattr(view._branchcaches, '_per_filter'):
2651 if util.safehasattr(view._branchcaches, '_per_filter'):
2547 filtered = view._branchcaches._per_filter
2652 filtered = view._branchcaches._per_filter
2548 else:
2653 else:
2549 # older versions
2654 # older versions
2550 filtered = view._branchcaches
2655 filtered = view._branchcaches
2551 def d():
2656 def d():
2552 if clear_revbranch:
2657 if clear_revbranch:
2553 repo.revbranchcache()._clear()
2658 repo.revbranchcache()._clear()
2554 if full:
2659 if full:
2555 view._branchcaches.clear()
2660 view._branchcaches.clear()
2556 else:
2661 else:
2557 filtered.pop(filtername, None)
2662 filtered.pop(filtername, None)
2558 view.branchmap()
2663 view.branchmap()
2559 return d
2664 return d
2560 # add filter in smaller subset to bigger subset
2665 # add filter in smaller subset to bigger subset
2561 possiblefilters = set(repoview.filtertable)
2666 possiblefilters = set(repoview.filtertable)
2562 if filternames:
2667 if filternames:
2563 possiblefilters &= set(filternames)
2668 possiblefilters &= set(filternames)
2564 subsettable = getbranchmapsubsettable()
2669 subsettable = getbranchmapsubsettable()
2565 allfilters = []
2670 allfilters = []
2566 while possiblefilters:
2671 while possiblefilters:
2567 for name in possiblefilters:
2672 for name in possiblefilters:
2568 subset = subsettable.get(name)
2673 subset = subsettable.get(name)
2569 if subset not in possiblefilters:
2674 if subset not in possiblefilters:
2570 break
2675 break
2571 else:
2676 else:
2572 assert False, b'subset cycle %s!' % possiblefilters
2677 assert False, b'subset cycle %s!' % possiblefilters
2573 allfilters.append(name)
2678 allfilters.append(name)
2574 possiblefilters.remove(name)
2679 possiblefilters.remove(name)
2575
2680
2576 # warm the cache
2681 # warm the cache
2577 if not full:
2682 if not full:
2578 for name in allfilters:
2683 for name in allfilters:
2579 repo.filtered(name).branchmap()
2684 repo.filtered(name).branchmap()
2580 if not filternames or b'unfiltered' in filternames:
2685 if not filternames or b'unfiltered' in filternames:
2581 # add unfiltered
2686 # add unfiltered
2582 allfilters.append(None)
2687 allfilters.append(None)
2583
2688
2584 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2689 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2585 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2690 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2586 branchcacheread.set(classmethod(lambda *args: None))
2691 branchcacheread.set(classmethod(lambda *args: None))
2587 else:
2692 else:
2588 # older versions
2693 # older versions
2589 branchcacheread = safeattrsetter(branchmap, b'read')
2694 branchcacheread = safeattrsetter(branchmap, b'read')
2590 branchcacheread.set(lambda *args: None)
2695 branchcacheread.set(lambda *args: None)
2591 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2696 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2592 branchcachewrite.set(lambda *args: None)
2697 branchcachewrite.set(lambda *args: None)
2593 try:
2698 try:
2594 for name in allfilters:
2699 for name in allfilters:
2595 printname = name
2700 printname = name
2596 if name is None:
2701 if name is None:
2597 printname = b'unfiltered'
2702 printname = b'unfiltered'
2598 timer(getbranchmap(name), title=str(printname))
2703 timer(getbranchmap(name), title=str(printname))
2599 finally:
2704 finally:
2600 branchcacheread.restore()
2705 branchcacheread.restore()
2601 branchcachewrite.restore()
2706 branchcachewrite.restore()
2602 fm.end()
2707 fm.end()
2603
2708
2604 @command(b'perfbranchmapupdate', [
2709 @command(b'perfbranchmapupdate', [
2605 (b'', b'base', [], b'subset of revision to start from'),
2710 (b'', b'base', [], b'subset of revision to start from'),
2606 (b'', b'target', [], b'subset of revision to end with'),
2711 (b'', b'target', [], b'subset of revision to end with'),
2607 (b'', b'clear-caches', False, b'clear cache between each runs')
2712 (b'', b'clear-caches', False, b'clear cache between each runs')
2608 ] + formatteropts)
2713 ] + formatteropts)
2609 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2714 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2610 """benchmark branchmap update from for <base> revs to <target> revs
2715 """benchmark branchmap update from for <base> revs to <target> revs
2611
2716
2612 If `--clear-caches` is passed, the following items will be reset before
2717 If `--clear-caches` is passed, the following items will be reset before
2613 each update:
2718 each update:
2614 * the changelog instance and associated indexes
2719 * the changelog instance and associated indexes
2615 * the rev-branch-cache instance
2720 * the rev-branch-cache instance
2616
2721
2617 Examples:
2722 Examples:
2618
2723
2619 # update for the one last revision
2724 # update for the one last revision
2620 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2725 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2621
2726
2622 $ update for change coming with a new branch
2727 $ update for change coming with a new branch
2623 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2728 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2624 """
2729 """
2625 from mercurial import branchmap
2730 from mercurial import branchmap
2626 from mercurial import repoview
2731 from mercurial import repoview
2627 opts = _byteskwargs(opts)
2732 opts = _byteskwargs(opts)
2628 timer, fm = gettimer(ui, opts)
2733 timer, fm = gettimer(ui, opts)
2629 clearcaches = opts[b'clear_caches']
2734 clearcaches = opts[b'clear_caches']
2630 unfi = repo.unfiltered()
2735 unfi = repo.unfiltered()
2631 x = [None] # used to pass data between closure
2736 x = [None] # used to pass data between closure
2632
2737
2633 # we use a `list` here to avoid possible side effect from smartset
2738 # we use a `list` here to avoid possible side effect from smartset
2634 baserevs = list(scmutil.revrange(repo, base))
2739 baserevs = list(scmutil.revrange(repo, base))
2635 targetrevs = list(scmutil.revrange(repo, target))
2740 targetrevs = list(scmutil.revrange(repo, target))
2636 if not baserevs:
2741 if not baserevs:
2637 raise error.Abort(b'no revisions selected for --base')
2742 raise error.Abort(b'no revisions selected for --base')
2638 if not targetrevs:
2743 if not targetrevs:
2639 raise error.Abort(b'no revisions selected for --target')
2744 raise error.Abort(b'no revisions selected for --target')
2640
2745
2641 # make sure the target branchmap also contains the one in the base
2746 # make sure the target branchmap also contains the one in the base
2642 targetrevs = list(set(baserevs) | set(targetrevs))
2747 targetrevs = list(set(baserevs) | set(targetrevs))
2643 targetrevs.sort()
2748 targetrevs.sort()
2644
2749
2645 cl = repo.changelog
2750 cl = repo.changelog
2646 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2751 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2647 allbaserevs.sort()
2752 allbaserevs.sort()
2648 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2753 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2649
2754
2650 newrevs = list(alltargetrevs.difference(allbaserevs))
2755 newrevs = list(alltargetrevs.difference(allbaserevs))
2651 newrevs.sort()
2756 newrevs.sort()
2652
2757
2653 allrevs = frozenset(unfi.changelog.revs())
2758 allrevs = frozenset(unfi.changelog.revs())
2654 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2759 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2655 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2760 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2656
2761
2657 def basefilter(repo, visibilityexceptions=None):
2762 def basefilter(repo, visibilityexceptions=None):
2658 return basefilterrevs
2763 return basefilterrevs
2659
2764
2660 def targetfilter(repo, visibilityexceptions=None):
2765 def targetfilter(repo, visibilityexceptions=None):
2661 return targetfilterrevs
2766 return targetfilterrevs
2662
2767
2663 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2768 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2664 ui.status(msg % (len(allbaserevs), len(newrevs)))
2769 ui.status(msg % (len(allbaserevs), len(newrevs)))
2665 if targetfilterrevs:
2770 if targetfilterrevs:
2666 msg = b'(%d revisions still filtered)\n'
2771 msg = b'(%d revisions still filtered)\n'
2667 ui.status(msg % len(targetfilterrevs))
2772 ui.status(msg % len(targetfilterrevs))
2668
2773
2669 try:
2774 try:
2670 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2775 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2671 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2776 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2672
2777
2673 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2778 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2674 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2779 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2675
2780
2676 # try to find an existing branchmap to reuse
2781 # try to find an existing branchmap to reuse
2677 subsettable = getbranchmapsubsettable()
2782 subsettable = getbranchmapsubsettable()
2678 candidatefilter = subsettable.get(None)
2783 candidatefilter = subsettable.get(None)
2679 while candidatefilter is not None:
2784 while candidatefilter is not None:
2680 candidatebm = repo.filtered(candidatefilter).branchmap()
2785 candidatebm = repo.filtered(candidatefilter).branchmap()
2681 if candidatebm.validfor(baserepo):
2786 if candidatebm.validfor(baserepo):
2682 filtered = repoview.filterrevs(repo, candidatefilter)
2787 filtered = repoview.filterrevs(repo, candidatefilter)
2683 missing = [r for r in allbaserevs if r in filtered]
2788 missing = [r for r in allbaserevs if r in filtered]
2684 base = candidatebm.copy()
2789 base = candidatebm.copy()
2685 base.update(baserepo, missing)
2790 base.update(baserepo, missing)
2686 break
2791 break
2687 candidatefilter = subsettable.get(candidatefilter)
2792 candidatefilter = subsettable.get(candidatefilter)
2688 else:
2793 else:
2689 # no suitable subset where found
2794 # no suitable subset where found
2690 base = branchmap.branchcache()
2795 base = branchmap.branchcache()
2691 base.update(baserepo, allbaserevs)
2796 base.update(baserepo, allbaserevs)
2692
2797
2693 def setup():
2798 def setup():
2694 x[0] = base.copy()
2799 x[0] = base.copy()
2695 if clearcaches:
2800 if clearcaches:
2696 unfi._revbranchcache = None
2801 unfi._revbranchcache = None
2697 clearchangelog(repo)
2802 clearchangelog(repo)
2698
2803
2699 def bench():
2804 def bench():
2700 x[0].update(targetrepo, newrevs)
2805 x[0].update(targetrepo, newrevs)
2701
2806
2702 timer(bench, setup=setup)
2807 timer(bench, setup=setup)
2703 fm.end()
2808 fm.end()
2704 finally:
2809 finally:
2705 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2810 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2706 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2811 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2707
2812
2708 @command(b'perfbranchmapload', [
2813 @command(b'perfbranchmapload', [
2709 (b'f', b'filter', b'', b'Specify repoview filter'),
2814 (b'f', b'filter', b'', b'Specify repoview filter'),
2710 (b'', b'list', False, b'List brachmap filter caches'),
2815 (b'', b'list', False, b'List brachmap filter caches'),
2711 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2816 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2712
2817
2713 ] + formatteropts)
2818 ] + formatteropts)
2714 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2819 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2715 """benchmark reading the branchmap"""
2820 """benchmark reading the branchmap"""
2716 opts = _byteskwargs(opts)
2821 opts = _byteskwargs(opts)
2717 clearrevlogs = opts[b'clear_revlogs']
2822 clearrevlogs = opts[b'clear_revlogs']
2718
2823
2719 if list:
2824 if list:
2720 for name, kind, st in repo.cachevfs.readdir(stat=True):
2825 for name, kind, st in repo.cachevfs.readdir(stat=True):
2721 if name.startswith(b'branch2'):
2826 if name.startswith(b'branch2'):
2722 filtername = name.partition(b'-')[2] or b'unfiltered'
2827 filtername = name.partition(b'-')[2] or b'unfiltered'
2723 ui.status(b'%s - %s\n'
2828 ui.status(b'%s - %s\n'
2724 % (filtername, util.bytecount(st.st_size)))
2829 % (filtername, util.bytecount(st.st_size)))
2725 return
2830 return
2726 if not filter:
2831 if not filter:
2727 filter = None
2832 filter = None
2728 subsettable = getbranchmapsubsettable()
2833 subsettable = getbranchmapsubsettable()
2729 if filter is None:
2834 if filter is None:
2730 repo = repo.unfiltered()
2835 repo = repo.unfiltered()
2731 else:
2836 else:
2732 repo = repoview.repoview(repo, filter)
2837 repo = repoview.repoview(repo, filter)
2733
2838
2734 repo.branchmap() # make sure we have a relevant, up to date branchmap
2839 repo.branchmap() # make sure we have a relevant, up to date branchmap
2735
2840
2736 try:
2841 try:
2737 fromfile = branchmap.branchcache.fromfile
2842 fromfile = branchmap.branchcache.fromfile
2738 except AttributeError:
2843 except AttributeError:
2739 # older versions
2844 # older versions
2740 fromfile = branchmap.read
2845 fromfile = branchmap.read
2741
2846
2742 currentfilter = filter
2847 currentfilter = filter
2743 # try once without timer, the filter may not be cached
2848 # try once without timer, the filter may not be cached
2744 while fromfile(repo) is None:
2849 while fromfile(repo) is None:
2745 currentfilter = subsettable.get(currentfilter)
2850 currentfilter = subsettable.get(currentfilter)
2746 if currentfilter is None:
2851 if currentfilter is None:
2747 raise error.Abort(b'No branchmap cached for %s repo'
2852 raise error.Abort(b'No branchmap cached for %s repo'
2748 % (filter or b'unfiltered'))
2853 % (filter or b'unfiltered'))
2749 repo = repo.filtered(currentfilter)
2854 repo = repo.filtered(currentfilter)
2750 timer, fm = gettimer(ui, opts)
2855 timer, fm = gettimer(ui, opts)
2751 def setup():
2856 def setup():
2752 if clearrevlogs:
2857 if clearrevlogs:
2753 clearchangelog(repo)
2858 clearchangelog(repo)
2754 def bench():
2859 def bench():
2755 fromfile(repo)
2860 fromfile(repo)
2756 timer(bench, setup=setup)
2861 timer(bench, setup=setup)
2757 fm.end()
2862 fm.end()
2758
2863
2759 @command(b'perfloadmarkers')
2864 @command(b'perfloadmarkers')
2760 def perfloadmarkers(ui, repo):
2865 def perfloadmarkers(ui, repo):
2761 """benchmark the time to parse the on-disk markers for a repo
2866 """benchmark the time to parse the on-disk markers for a repo
2762
2867
2763 Result is the number of markers in the repo."""
2868 Result is the number of markers in the repo."""
2764 timer, fm = gettimer(ui)
2869 timer, fm = gettimer(ui)
2765 svfs = getsvfs(repo)
2870 svfs = getsvfs(repo)
2766 timer(lambda: len(obsolete.obsstore(svfs)))
2871 timer(lambda: len(obsolete.obsstore(svfs)))
2767 fm.end()
2872 fm.end()
2768
2873
2769 @command(b'perflrucachedict', formatteropts +
2874 @command(b'perflrucachedict', formatteropts +
2770 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2875 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2771 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2876 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2772 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2877 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2773 (b'', b'size', 4, b'size of cache'),
2878 (b'', b'size', 4, b'size of cache'),
2774 (b'', b'gets', 10000, b'number of key lookups'),
2879 (b'', b'gets', 10000, b'number of key lookups'),
2775 (b'', b'sets', 10000, b'number of key sets'),
2880 (b'', b'sets', 10000, b'number of key sets'),
2776 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2881 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2777 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2882 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2778 norepo=True)
2883 norepo=True)
2779 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2884 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2780 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2885 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2781 opts = _byteskwargs(opts)
2886 opts = _byteskwargs(opts)
2782
2887
2783 def doinit():
2888 def doinit():
2784 for i in _xrange(10000):
2889 for i in _xrange(10000):
2785 util.lrucachedict(size)
2890 util.lrucachedict(size)
2786
2891
2787 costrange = list(range(mincost, maxcost + 1))
2892 costrange = list(range(mincost, maxcost + 1))
2788
2893
2789 values = []
2894 values = []
2790 for i in _xrange(size):
2895 for i in _xrange(size):
2791 values.append(random.randint(0, _maxint))
2896 values.append(random.randint(0, _maxint))
2792
2897
2793 # Get mode fills the cache and tests raw lookup performance with no
2898 # Get mode fills the cache and tests raw lookup performance with no
2794 # eviction.
2899 # eviction.
2795 getseq = []
2900 getseq = []
2796 for i in _xrange(gets):
2901 for i in _xrange(gets):
2797 getseq.append(random.choice(values))
2902 getseq.append(random.choice(values))
2798
2903
2799 def dogets():
2904 def dogets():
2800 d = util.lrucachedict(size)
2905 d = util.lrucachedict(size)
2801 for v in values:
2906 for v in values:
2802 d[v] = v
2907 d[v] = v
2803 for key in getseq:
2908 for key in getseq:
2804 value = d[key]
2909 value = d[key]
2805 value # silence pyflakes warning
2910 value # silence pyflakes warning
2806
2911
2807 def dogetscost():
2912 def dogetscost():
2808 d = util.lrucachedict(size, maxcost=costlimit)
2913 d = util.lrucachedict(size, maxcost=costlimit)
2809 for i, v in enumerate(values):
2914 for i, v in enumerate(values):
2810 d.insert(v, v, cost=costs[i])
2915 d.insert(v, v, cost=costs[i])
2811 for key in getseq:
2916 for key in getseq:
2812 try:
2917 try:
2813 value = d[key]
2918 value = d[key]
2814 value # silence pyflakes warning
2919 value # silence pyflakes warning
2815 except KeyError:
2920 except KeyError:
2816 pass
2921 pass
2817
2922
2818 # Set mode tests insertion speed with cache eviction.
2923 # Set mode tests insertion speed with cache eviction.
2819 setseq = []
2924 setseq = []
2820 costs = []
2925 costs = []
2821 for i in _xrange(sets):
2926 for i in _xrange(sets):
2822 setseq.append(random.randint(0, _maxint))
2927 setseq.append(random.randint(0, _maxint))
2823 costs.append(random.choice(costrange))
2928 costs.append(random.choice(costrange))
2824
2929
2825 def doinserts():
2930 def doinserts():
2826 d = util.lrucachedict(size)
2931 d = util.lrucachedict(size)
2827 for v in setseq:
2932 for v in setseq:
2828 d.insert(v, v)
2933 d.insert(v, v)
2829
2934
2830 def doinsertscost():
2935 def doinsertscost():
2831 d = util.lrucachedict(size, maxcost=costlimit)
2936 d = util.lrucachedict(size, maxcost=costlimit)
2832 for i, v in enumerate(setseq):
2937 for i, v in enumerate(setseq):
2833 d.insert(v, v, cost=costs[i])
2938 d.insert(v, v, cost=costs[i])
2834
2939
2835 def dosets():
2940 def dosets():
2836 d = util.lrucachedict(size)
2941 d = util.lrucachedict(size)
2837 for v in setseq:
2942 for v in setseq:
2838 d[v] = v
2943 d[v] = v
2839
2944
2840 # Mixed mode randomly performs gets and sets with eviction.
2945 # Mixed mode randomly performs gets and sets with eviction.
2841 mixedops = []
2946 mixedops = []
2842 for i in _xrange(mixed):
2947 for i in _xrange(mixed):
2843 r = random.randint(0, 100)
2948 r = random.randint(0, 100)
2844 if r < mixedgetfreq:
2949 if r < mixedgetfreq:
2845 op = 0
2950 op = 0
2846 else:
2951 else:
2847 op = 1
2952 op = 1
2848
2953
2849 mixedops.append((op,
2954 mixedops.append((op,
2850 random.randint(0, size * 2),
2955 random.randint(0, size * 2),
2851 random.choice(costrange)))
2956 random.choice(costrange)))
2852
2957
2853 def domixed():
2958 def domixed():
2854 d = util.lrucachedict(size)
2959 d = util.lrucachedict(size)
2855
2960
2856 for op, v, cost in mixedops:
2961 for op, v, cost in mixedops:
2857 if op == 0:
2962 if op == 0:
2858 try:
2963 try:
2859 d[v]
2964 d[v]
2860 except KeyError:
2965 except KeyError:
2861 pass
2966 pass
2862 else:
2967 else:
2863 d[v] = v
2968 d[v] = v
2864
2969
2865 def domixedcost():
2970 def domixedcost():
2866 d = util.lrucachedict(size, maxcost=costlimit)
2971 d = util.lrucachedict(size, maxcost=costlimit)
2867
2972
2868 for op, v, cost in mixedops:
2973 for op, v, cost in mixedops:
2869 if op == 0:
2974 if op == 0:
2870 try:
2975 try:
2871 d[v]
2976 d[v]
2872 except KeyError:
2977 except KeyError:
2873 pass
2978 pass
2874 else:
2979 else:
2875 d.insert(v, v, cost=cost)
2980 d.insert(v, v, cost=cost)
2876
2981
2877 benches = [
2982 benches = [
2878 (doinit, b'init'),
2983 (doinit, b'init'),
2879 ]
2984 ]
2880
2985
2881 if costlimit:
2986 if costlimit:
2882 benches.extend([
2987 benches.extend([
2883 (dogetscost, b'gets w/ cost limit'),
2988 (dogetscost, b'gets w/ cost limit'),
2884 (doinsertscost, b'inserts w/ cost limit'),
2989 (doinsertscost, b'inserts w/ cost limit'),
2885 (domixedcost, b'mixed w/ cost limit'),
2990 (domixedcost, b'mixed w/ cost limit'),
2886 ])
2991 ])
2887 else:
2992 else:
2888 benches.extend([
2993 benches.extend([
2889 (dogets, b'gets'),
2994 (dogets, b'gets'),
2890 (doinserts, b'inserts'),
2995 (doinserts, b'inserts'),
2891 (dosets, b'sets'),
2996 (dosets, b'sets'),
2892 (domixed, b'mixed')
2997 (domixed, b'mixed')
2893 ])
2998 ])
2894
2999
2895 for fn, title in benches:
3000 for fn, title in benches:
2896 timer, fm = gettimer(ui, opts)
3001 timer, fm = gettimer(ui, opts)
2897 timer(fn, title=title)
3002 timer(fn, title=title)
2898 fm.end()
3003 fm.end()
2899
3004
2900 @command(b'perfwrite', formatteropts)
3005 @command(b'perfwrite', formatteropts)
2901 def perfwrite(ui, repo, **opts):
3006 def perfwrite(ui, repo, **opts):
2902 """microbenchmark ui.write
3007 """microbenchmark ui.write
2903 """
3008 """
2904 opts = _byteskwargs(opts)
3009 opts = _byteskwargs(opts)
2905
3010
2906 timer, fm = gettimer(ui, opts)
3011 timer, fm = gettimer(ui, opts)
2907 def write():
3012 def write():
2908 for i in range(100000):
3013 for i in range(100000):
2909 ui.write((b'Testing write performance\n'))
3014 ui.write((b'Testing write performance\n'))
2910 timer(write)
3015 timer(write)
2911 fm.end()
3016 fm.end()
2912
3017
2913 def uisetup(ui):
3018 def uisetup(ui):
2914 if (util.safehasattr(cmdutil, b'openrevlog') and
3019 if (util.safehasattr(cmdutil, b'openrevlog') and
2915 not util.safehasattr(commands, b'debugrevlogopts')):
3020 not util.safehasattr(commands, b'debugrevlogopts')):
2916 # for "historical portability":
3021 # for "historical portability":
2917 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3022 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2918 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3023 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2919 # openrevlog() should cause failure, because it has been
3024 # openrevlog() should cause failure, because it has been
2920 # available since 3.5 (or 49c583ca48c4).
3025 # available since 3.5 (or 49c583ca48c4).
2921 def openrevlog(orig, repo, cmd, file_, opts):
3026 def openrevlog(orig, repo, cmd, file_, opts):
2922 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3027 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2923 raise error.Abort(b"This version doesn't support --dir option",
3028 raise error.Abort(b"This version doesn't support --dir option",
2924 hint=b"use 3.5 or later")
3029 hint=b"use 3.5 or later")
2925 return orig(repo, cmd, file_, opts)
3030 return orig(repo, cmd, file_, opts)
2926 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3031 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2927
3032
2928 @command(b'perfprogress', formatteropts + [
3033 @command(b'perfprogress', formatteropts + [
2929 (b'', b'topic', b'topic', b'topic for progress messages'),
3034 (b'', b'topic', b'topic', b'topic for progress messages'),
2930 (b'c', b'total', 1000000, b'total value we are progressing to'),
3035 (b'c', b'total', 1000000, b'total value we are progressing to'),
2931 ], norepo=True)
3036 ], norepo=True)
2932 def perfprogress(ui, topic=None, total=None, **opts):
3037 def perfprogress(ui, topic=None, total=None, **opts):
2933 """printing of progress bars"""
3038 """printing of progress bars"""
2934 opts = _byteskwargs(opts)
3039 opts = _byteskwargs(opts)
2935
3040
2936 timer, fm = gettimer(ui, opts)
3041 timer, fm = gettimer(ui, opts)
2937
3042
2938 def doprogress():
3043 def doprogress():
2939 with ui.makeprogress(topic, total=total) as progress:
3044 with ui.makeprogress(topic, total=total) as progress:
2940 for i in pycompat.xrange(total):
3045 for i in pycompat.xrange(total):
2941 progress.increment()
3046 progress.increment()
2942
3047
2943 timer(doprogress)
3048 timer(doprogress)
2944 fm.end()
3049 fm.end()
@@ -1,393 +1,396 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perfaddremove
81 perfaddremove
82 (no help text available)
82 (no help text available)
83 perfancestors
83 perfancestors
84 (no help text available)
84 (no help text available)
85 perfancestorset
85 perfancestorset
86 (no help text available)
86 (no help text available)
87 perfannotate (no help text available)
87 perfannotate (no help text available)
88 perfbdiff benchmark a bdiff between revisions
88 perfbdiff benchmark a bdiff between revisions
89 perfbookmarks
89 perfbookmarks
90 benchmark parsing bookmarks from disk to memory
90 benchmark parsing bookmarks from disk to memory
91 perfbranchmap
91 perfbranchmap
92 benchmark the update of a branchmap
92 benchmark the update of a branchmap
93 perfbranchmapload
93 perfbranchmapload
94 benchmark reading the branchmap
94 benchmark reading the branchmap
95 perfbranchmapupdate
95 perfbranchmapupdate
96 benchmark branchmap update from for <base> revs to <target>
96 benchmark branchmap update from for <base> revs to <target>
97 revs
97 revs
98 perfbundleread
98 perfbundleread
99 Benchmark reading of bundle files.
99 Benchmark reading of bundle files.
100 perfcca (no help text available)
100 perfcca (no help text available)
101 perfchangegroupchangelog
101 perfchangegroupchangelog
102 Benchmark producing a changelog group for a changegroup.
102 Benchmark producing a changelog group for a changegroup.
103 perfchangeset
103 perfchangeset
104 (no help text available)
104 (no help text available)
105 perfctxfiles (no help text available)
105 perfctxfiles (no help text available)
106 perfdiffwd Profile diff of working directory changes
106 perfdiffwd Profile diff of working directory changes
107 perfdirfoldmap
107 perfdirfoldmap
108 (no help text available)
108 (no help text available)
109 perfdirs (no help text available)
109 perfdirs (no help text available)
110 perfdirstate (no help text available)
110 perfdirstate (no help text available)
111 perfdirstatedirs
111 perfdirstatedirs
112 (no help text available)
112 (no help text available)
113 perfdirstatefoldmap
113 perfdirstatefoldmap
114 (no help text available)
114 (no help text available)
115 perfdirstatewrite
115 perfdirstatewrite
116 (no help text available)
116 (no help text available)
117 perfdiscovery
117 perfdiscovery
118 benchmark discovery between local repo and the peer at given
118 benchmark discovery between local repo and the peer at given
119 path
119 path
120 perffncacheencode
120 perffncacheencode
121 (no help text available)
121 (no help text available)
122 perffncacheload
122 perffncacheload
123 (no help text available)
123 (no help text available)
124 perffncachewrite
124 perffncachewrite
125 (no help text available)
125 (no help text available)
126 perfheads benchmark the computation of a changelog heads
126 perfheads benchmark the computation of a changelog heads
127 perfhelper-mergecopies
128 find statistics about potential parameters for
129 'perfmergecopies'
127 perfhelper-pathcopies
130 perfhelper-pathcopies
128 find statistic about potential parameters for the
131 find statistic about potential parameters for the
129 'perftracecopies'
132 'perftracecopies'
130 perfignore benchmark operation related to computing ignore
133 perfignore benchmark operation related to computing ignore
131 perfindex benchmark index creation time followed by a lookup
134 perfindex benchmark index creation time followed by a lookup
132 perflinelogedits
135 perflinelogedits
133 (no help text available)
136 (no help text available)
134 perfloadmarkers
137 perfloadmarkers
135 benchmark the time to parse the on-disk markers for a repo
138 benchmark the time to parse the on-disk markers for a repo
136 perflog (no help text available)
139 perflog (no help text available)
137 perflookup (no help text available)
140 perflookup (no help text available)
138 perflrucachedict
141 perflrucachedict
139 (no help text available)
142 (no help text available)
140 perfmanifest benchmark the time to read a manifest from disk and return a
143 perfmanifest benchmark the time to read a manifest from disk and return a
141 usable
144 usable
142 perfmergecalculate
145 perfmergecalculate
143 (no help text available)
146 (no help text available)
144 perfmergecopies
147 perfmergecopies
145 measure runtime of 'copies.mergecopies'
148 measure runtime of 'copies.mergecopies'
146 perfmoonwalk benchmark walking the changelog backwards
149 perfmoonwalk benchmark walking the changelog backwards
147 perfnodelookup
150 perfnodelookup
148 (no help text available)
151 (no help text available)
149 perfnodemap benchmark the time necessary to look up revision from a cold
152 perfnodemap benchmark the time necessary to look up revision from a cold
150 nodemap
153 nodemap
151 perfparents benchmark the time necessary to fetch one changeset's parents.
154 perfparents benchmark the time necessary to fetch one changeset's parents.
152 perfpathcopies
155 perfpathcopies
153 benchmark the copy tracing logic
156 benchmark the copy tracing logic
154 perfphases benchmark phasesets computation
157 perfphases benchmark phasesets computation
155 perfphasesremote
158 perfphasesremote
156 benchmark time needed to analyse phases of the remote server
159 benchmark time needed to analyse phases of the remote server
157 perfprogress printing of progress bars
160 perfprogress printing of progress bars
158 perfrawfiles (no help text available)
161 perfrawfiles (no help text available)
159 perfrevlogchunks
162 perfrevlogchunks
160 Benchmark operations on revlog chunks.
163 Benchmark operations on revlog chunks.
161 perfrevlogindex
164 perfrevlogindex
162 Benchmark operations against a revlog index.
165 Benchmark operations against a revlog index.
163 perfrevlogrevision
166 perfrevlogrevision
164 Benchmark obtaining a revlog revision.
167 Benchmark obtaining a revlog revision.
165 perfrevlogrevisions
168 perfrevlogrevisions
166 Benchmark reading a series of revisions from a revlog.
169 Benchmark reading a series of revisions from a revlog.
167 perfrevlogwrite
170 perfrevlogwrite
168 Benchmark writing a series of revisions to a revlog.
171 Benchmark writing a series of revisions to a revlog.
169 perfrevrange (no help text available)
172 perfrevrange (no help text available)
170 perfrevset benchmark the execution time of a revset
173 perfrevset benchmark the execution time of a revset
171 perfstartup (no help text available)
174 perfstartup (no help text available)
172 perfstatus (no help text available)
175 perfstatus (no help text available)
173 perftags (no help text available)
176 perftags (no help text available)
174 perftemplating
177 perftemplating
175 test the rendering time of a given template
178 test the rendering time of a given template
176 perfunidiff benchmark a unified diff between revisions
179 perfunidiff benchmark a unified diff between revisions
177 perfvolatilesets
180 perfvolatilesets
178 benchmark the computation of various volatile set
181 benchmark the computation of various volatile set
179 perfwalk (no help text available)
182 perfwalk (no help text available)
180 perfwrite microbenchmark ui.write
183 perfwrite microbenchmark ui.write
181
184
182 (use 'hg help -v perf' to show built-in aliases and global options)
185 (use 'hg help -v perf' to show built-in aliases and global options)
183 $ hg perfaddremove
186 $ hg perfaddremove
184 $ hg perfancestors
187 $ hg perfancestors
185 $ hg perfancestorset 2
188 $ hg perfancestorset 2
186 $ hg perfannotate a
189 $ hg perfannotate a
187 $ hg perfbdiff -c 1
190 $ hg perfbdiff -c 1
188 $ hg perfbdiff --alldata 1
191 $ hg perfbdiff --alldata 1
189 $ hg perfunidiff -c 1
192 $ hg perfunidiff -c 1
190 $ hg perfunidiff --alldata 1
193 $ hg perfunidiff --alldata 1
191 $ hg perfbookmarks
194 $ hg perfbookmarks
192 $ hg perfbranchmap
195 $ hg perfbranchmap
193 $ hg perfbranchmapload
196 $ hg perfbranchmapload
194 $ hg perfbranchmapupdate --base "not tip" --target "tip"
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
195 benchmark of branchmap with 3 revisions with 1 new ones
198 benchmark of branchmap with 3 revisions with 1 new ones
196 $ hg perfcca
199 $ hg perfcca
197 $ hg perfchangegroupchangelog
200 $ hg perfchangegroupchangelog
198 $ hg perfchangegroupchangelog --cgversion 01
201 $ hg perfchangegroupchangelog --cgversion 01
199 $ hg perfchangeset 2
202 $ hg perfchangeset 2
200 $ hg perfctxfiles 2
203 $ hg perfctxfiles 2
201 $ hg perfdiffwd
204 $ hg perfdiffwd
202 $ hg perfdirfoldmap
205 $ hg perfdirfoldmap
203 $ hg perfdirs
206 $ hg perfdirs
204 $ hg perfdirstate
207 $ hg perfdirstate
205 $ hg perfdirstatedirs
208 $ hg perfdirstatedirs
206 $ hg perfdirstatefoldmap
209 $ hg perfdirstatefoldmap
207 $ hg perfdirstatewrite
210 $ hg perfdirstatewrite
208 #if repofncache
211 #if repofncache
209 $ hg perffncacheencode
212 $ hg perffncacheencode
210 $ hg perffncacheload
213 $ hg perffncacheload
211 $ hg debugrebuildfncache
214 $ hg debugrebuildfncache
212 fncache already up to date
215 fncache already up to date
213 $ hg perffncachewrite
216 $ hg perffncachewrite
214 $ hg debugrebuildfncache
217 $ hg debugrebuildfncache
215 fncache already up to date
218 fncache already up to date
216 #endif
219 #endif
217 $ hg perfheads
220 $ hg perfheads
218 $ hg perfignore
221 $ hg perfignore
219 $ hg perfindex
222 $ hg perfindex
220 $ hg perflinelogedits -n 1
223 $ hg perflinelogedits -n 1
221 $ hg perfloadmarkers
224 $ hg perfloadmarkers
222 $ hg perflog
225 $ hg perflog
223 $ hg perflookup 2
226 $ hg perflookup 2
224 $ hg perflrucache
227 $ hg perflrucache
225 $ hg perfmanifest 2
228 $ hg perfmanifest 2
226 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
227 $ hg perfmanifest -m 44fe2c8352bb
230 $ hg perfmanifest -m 44fe2c8352bb
228 abort: manifest revision must be integer or full node
231 abort: manifest revision must be integer or full node
229 [255]
232 [255]
230 $ hg perfmergecalculate -r 3
233 $ hg perfmergecalculate -r 3
231 $ hg perfmoonwalk
234 $ hg perfmoonwalk
232 $ hg perfnodelookup 2
235 $ hg perfnodelookup 2
233 $ hg perfpathcopies 1 2
236 $ hg perfpathcopies 1 2
234 $ hg perfprogress --total 1000
237 $ hg perfprogress --total 1000
235 $ hg perfrawfiles 2
238 $ hg perfrawfiles 2
236 $ hg perfrevlogindex -c
239 $ hg perfrevlogindex -c
237 #if reporevlogstore
240 #if reporevlogstore
238 $ hg perfrevlogrevisions .hg/store/data/a.i
241 $ hg perfrevlogrevisions .hg/store/data/a.i
239 #endif
242 #endif
240 $ hg perfrevlogrevision -m 0
243 $ hg perfrevlogrevision -m 0
241 $ hg perfrevlogchunks -c
244 $ hg perfrevlogchunks -c
242 $ hg perfrevrange
245 $ hg perfrevrange
243 $ hg perfrevset 'all()'
246 $ hg perfrevset 'all()'
244 $ hg perfstartup
247 $ hg perfstartup
245 $ hg perfstatus
248 $ hg perfstatus
246 $ hg perftags
249 $ hg perftags
247 $ hg perftemplating
250 $ hg perftemplating
248 $ hg perfvolatilesets
251 $ hg perfvolatilesets
249 $ hg perfwalk
252 $ hg perfwalk
250 $ hg perfparents
253 $ hg perfparents
251 $ hg perfdiscovery -q .
254 $ hg perfdiscovery -q .
252
255
253 Test run control
256 Test run control
254 ----------------
257 ----------------
255
258
256 Simple single entry
259 Simple single entry
257
260
258 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
259 ! wall * comb * user * sys * (best of 15) (glob)
262 ! wall * comb * user * sys * (best of 15) (glob)
260
263
261 Multiple entries
264 Multiple entries
262
265
263 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
264 ! wall * comb * user * sys * (best of 5) (glob)
267 ! wall * comb * user * sys * (best of 5) (glob)
265
268
266 error case are ignored
269 error case are ignored
267
270
268 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
269 malformatted run limit entry, missing "-": 500
272 malformatted run limit entry, missing "-": 500
270 ! wall * comb * user * sys * (best of 5) (glob)
273 ! wall * comb * user * sys * (best of 5) (glob)
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
272 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
273 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
274 ! wall * comb * user * sys * (best of 5) (glob)
277 ! wall * comb * user * sys * (best of 5) (glob)
275 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
276 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
277 ! wall * comb * user * sys * (best of 5) (glob)
280 ! wall * comb * user * sys * (best of 5) (glob)
278
281
279 test actual output
282 test actual output
280 ------------------
283 ------------------
281
284
282 normal output:
285 normal output:
283
286
284 $ hg perfheads --config perf.stub=no
287 $ hg perfheads --config perf.stub=no
285 ! wall * comb * user * sys * (best of *) (glob)
288 ! wall * comb * user * sys * (best of *) (glob)
286
289
287 detailed output:
290 detailed output:
288
291
289 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
290 ! wall * comb * user * sys * (best of *) (glob)
293 ! wall * comb * user * sys * (best of *) (glob)
291 ! wall * comb * user * sys * (max of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
292 ! wall * comb * user * sys * (avg of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
293 ! wall * comb * user * sys * (median of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
294
297
295 test json output
298 test json output
296 ----------------
299 ----------------
297
300
298 normal output:
301 normal output:
299
302
300 $ hg perfheads --template json --config perf.stub=no
303 $ hg perfheads --template json --config perf.stub=no
301 [
304 [
302 {
305 {
303 "comb": *, (glob)
306 "comb": *, (glob)
304 "count": *, (glob)
307 "count": *, (glob)
305 "sys": *, (glob)
308 "sys": *, (glob)
306 "user": *, (glob)
309 "user": *, (glob)
307 "wall": * (glob)
310 "wall": * (glob)
308 }
311 }
309 ]
312 ]
310
313
311 detailed output:
314 detailed output:
312
315
313 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
314 [
317 [
315 {
318 {
316 "avg.comb": *, (glob)
319 "avg.comb": *, (glob)
317 "avg.count": *, (glob)
320 "avg.count": *, (glob)
318 "avg.sys": *, (glob)
321 "avg.sys": *, (glob)
319 "avg.user": *, (glob)
322 "avg.user": *, (glob)
320 "avg.wall": *, (glob)
323 "avg.wall": *, (glob)
321 "comb": *, (glob)
324 "comb": *, (glob)
322 "count": *, (glob)
325 "count": *, (glob)
323 "max.comb": *, (glob)
326 "max.comb": *, (glob)
324 "max.count": *, (glob)
327 "max.count": *, (glob)
325 "max.sys": *, (glob)
328 "max.sys": *, (glob)
326 "max.user": *, (glob)
329 "max.user": *, (glob)
327 "max.wall": *, (glob)
330 "max.wall": *, (glob)
328 "median.comb": *, (glob)
331 "median.comb": *, (glob)
329 "median.count": *, (glob)
332 "median.count": *, (glob)
330 "median.sys": *, (glob)
333 "median.sys": *, (glob)
331 "median.user": *, (glob)
334 "median.user": *, (glob)
332 "median.wall": *, (glob)
335 "median.wall": *, (glob)
333 "sys": *, (glob)
336 "sys": *, (glob)
334 "user": *, (glob)
337 "user": *, (glob)
335 "wall": * (glob)
338 "wall": * (glob)
336 }
339 }
337 ]
340 ]
338
341
339 Test pre-run feature
342 Test pre-run feature
340 --------------------
343 --------------------
341
344
342 (perf discovery has some spurious output)
345 (perf discovery has some spurious output)
343
346
344 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
345 ! wall * comb * user * sys * (best of 1) (glob)
348 ! wall * comb * user * sys * (best of 1) (glob)
346 searching for changes
349 searching for changes
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
348 ! wall * comb * user * sys * (best of 1) (glob)
351 ! wall * comb * user * sys * (best of 1) (glob)
349 searching for changes
352 searching for changes
350 searching for changes
353 searching for changes
351 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
352 ! wall * comb * user * sys * (best of 1) (glob)
355 ! wall * comb * user * sys * (best of 1) (glob)
353 searching for changes
356 searching for changes
354 searching for changes
357 searching for changes
355 searching for changes
358 searching for changes
356 searching for changes
359 searching for changes
357
360
358 test profile-benchmark option
361 test profile-benchmark option
359 ------------------------------
362 ------------------------------
360
363
361 Function to check that statprof ran
364 Function to check that statprof ran
362 $ statprofran () {
365 $ statprofran () {
363 > egrep 'Sample count:|No samples recorded' > /dev/null
366 > egrep 'Sample count:|No samples recorded' > /dev/null
364 > }
367 > }
365 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
366
369
367 Check perf.py for historical portability
370 Check perf.py for historical portability
368 ----------------------------------------
371 ----------------------------------------
369
372
370 $ cd "$TESTDIR/.."
373 $ cd "$TESTDIR/.."
371
374
372 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
373 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
374 > "$TESTDIR"/check-perf-code.py contrib/perf.py
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
375 contrib/perf.py:\d+: (re)
378 contrib/perf.py:\d+: (re)
376 > from mercurial import (
379 > from mercurial import (
377 import newer module separately in try clause for early Mercurial
380 import newer module separately in try clause for early Mercurial
378 contrib/perf.py:\d+: (re)
381 contrib/perf.py:\d+: (re)
379 > from mercurial import (
382 > from mercurial import (
380 import newer module separately in try clause for early Mercurial
383 import newer module separately in try clause for early Mercurial
381 contrib/perf.py:\d+: (re)
384 contrib/perf.py:\d+: (re)
382 > origindexpath = orig.opener.join(orig.indexfile)
385 > origindexpath = orig.opener.join(orig.indexfile)
383 use getvfs()/getsvfs() for early Mercurial
386 use getvfs()/getsvfs() for early Mercurial
384 contrib/perf.py:\d+: (re)
387 contrib/perf.py:\d+: (re)
385 > origdatapath = orig.opener.join(orig.datafile)
388 > origdatapath = orig.opener.join(orig.datafile)
386 use getvfs()/getsvfs() for early Mercurial
389 use getvfs()/getsvfs() for early Mercurial
387 contrib/perf.py:\d+: (re)
390 contrib/perf.py:\d+: (re)
388 > vfs = vfsmod.vfs(tmpdir)
391 > vfs = vfsmod.vfs(tmpdir)
389 use getvfs()/getsvfs() for early Mercurial
392 use getvfs()/getsvfs() for early Mercurial
390 contrib/perf.py:\d+: (re)
393 contrib/perf.py:\d+: (re)
391 > vfs.options = getattr(orig.opener, 'options', None)
394 > vfs.options = getattr(orig.opener, 'options', None)
392 use getvfs()/getsvfs() for early Mercurial
395 use getvfs()/getsvfs() for early Mercurial
393 [1]
396 [1]
General Comments 0
You need to be logged in to leave comments. Login now