##// END OF EJS Templates
perf: factor selection of revisions involved in the merge out...
marmoute -
r42575:3a3592b4 default
parent child Browse files
Show More
@@ -1,2924 +1,2926 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 if pycompat.ispy3:
131 if pycompat.ispy3:
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 else:
133 else:
134 _maxint = sys.maxint
134 _maxint = sys.maxint
135 except (ImportError, AttributeError):
135 except (ImportError, AttributeError):
136 import inspect
136 import inspect
137 getargspec = inspect.getargspec
137 getargspec = inspect.getargspec
138 _byteskwargs = identity
138 _byteskwargs = identity
139 fsencode = identity # no py3 support
139 fsencode = identity # no py3 support
140 _maxint = sys.maxint # no py3 support
140 _maxint = sys.maxint # no py3 support
141 _sysstr = lambda x: x # no py3 support
141 _sysstr = lambda x: x # no py3 support
142 _xrange = xrange
142 _xrange = xrange
143
143
144 try:
144 try:
145 # 4.7+
145 # 4.7+
146 queue = pycompat.queue.Queue
146 queue = pycompat.queue.Queue
147 except (AttributeError, ImportError):
147 except (AttributeError, ImportError):
148 # <4.7.
148 # <4.7.
149 try:
149 try:
150 queue = pycompat.queue
150 queue = pycompat.queue
151 except (AttributeError, ImportError):
151 except (AttributeError, ImportError):
152 queue = util.queue
152 queue = util.queue
153
153
154 try:
154 try:
155 from mercurial import logcmdutil
155 from mercurial import logcmdutil
156 makelogtemplater = logcmdutil.maketemplater
156 makelogtemplater = logcmdutil.maketemplater
157 except (AttributeError, ImportError):
157 except (AttributeError, ImportError):
158 try:
158 try:
159 makelogtemplater = cmdutil.makelogtemplater
159 makelogtemplater = cmdutil.makelogtemplater
160 except (AttributeError, ImportError):
160 except (AttributeError, ImportError):
161 makelogtemplater = None
161 makelogtemplater = None
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # define util.safehasattr forcibly, because util.safehasattr has been
164 # define util.safehasattr forcibly, because util.safehasattr has been
165 # available since 1.9.3 (or 94b200a11cf7)
165 # available since 1.9.3 (or 94b200a11cf7)
166 _undefined = object()
166 _undefined = object()
167 def safehasattr(thing, attr):
167 def safehasattr(thing, attr):
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 setattr(util, 'safehasattr', safehasattr)
169 setattr(util, 'safehasattr', safehasattr)
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.timer forcibly, because util.timer has been available
172 # define util.timer forcibly, because util.timer has been available
173 # since ae5d60bb70c9
173 # since ae5d60bb70c9
174 if safehasattr(time, 'perf_counter'):
174 if safehasattr(time, 'perf_counter'):
175 util.timer = time.perf_counter
175 util.timer = time.perf_counter
176 elif os.name == b'nt':
176 elif os.name == b'nt':
177 util.timer = time.clock
177 util.timer = time.clock
178 else:
178 else:
179 util.timer = time.time
179 util.timer = time.time
180
180
181 # for "historical portability":
181 # for "historical portability":
182 # use locally defined empty option list, if formatteropts isn't
182 # use locally defined empty option list, if formatteropts isn't
183 # available, because commands.formatteropts has been available since
183 # available, because commands.formatteropts has been available since
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 # available since 2.2 (or ae5f92e154d3)
185 # available since 2.2 (or ae5f92e154d3)
186 formatteropts = getattr(cmdutil, "formatteropts",
186 formatteropts = getattr(cmdutil, "formatteropts",
187 getattr(commands, "formatteropts", []))
187 getattr(commands, "formatteropts", []))
188
188
189 # for "historical portability":
189 # for "historical portability":
190 # use locally defined option list, if debugrevlogopts isn't available,
190 # use locally defined option list, if debugrevlogopts isn't available,
191 # because commands.debugrevlogopts has been available since 3.7 (or
191 # because commands.debugrevlogopts has been available since 3.7 (or
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 # since 1.9 (or a79fea6b3e77).
193 # since 1.9 (or a79fea6b3e77).
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 getattr(commands, "debugrevlogopts", [
195 getattr(commands, "debugrevlogopts", [
196 (b'c', b'changelog', False, (b'open changelog')),
196 (b'c', b'changelog', False, (b'open changelog')),
197 (b'm', b'manifest', False, (b'open manifest')),
197 (b'm', b'manifest', False, (b'open manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
199 ]))
199 ]))
200
200
201 cmdtable = {}
201 cmdtable = {}
202
202
203 # for "historical portability":
203 # for "historical portability":
204 # define parsealiases locally, because cmdutil.parsealiases has been
204 # define parsealiases locally, because cmdutil.parsealiases has been
205 # available since 1.5 (or 6252852b4332)
205 # available since 1.5 (or 6252852b4332)
206 def parsealiases(cmd):
206 def parsealiases(cmd):
207 return cmd.split(b"|")
207 return cmd.split(b"|")
208
208
209 if safehasattr(registrar, 'command'):
209 if safehasattr(registrar, 'command'):
210 command = registrar.command(cmdtable)
210 command = registrar.command(cmdtable)
211 elif safehasattr(cmdutil, 'command'):
211 elif safehasattr(cmdutil, 'command'):
212 command = cmdutil.command(cmdtable)
212 command = cmdutil.command(cmdtable)
213 if b'norepo' not in getargspec(command).args:
213 if b'norepo' not in getargspec(command).args:
214 # for "historical portability":
214 # for "historical portability":
215 # wrap original cmdutil.command, because "norepo" option has
215 # wrap original cmdutil.command, because "norepo" option has
216 # been available since 3.1 (or 75a96326cecb)
216 # been available since 3.1 (or 75a96326cecb)
217 _command = command
217 _command = command
218 def command(name, options=(), synopsis=None, norepo=False):
218 def command(name, options=(), synopsis=None, norepo=False):
219 if norepo:
219 if norepo:
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 return _command(name, list(options), synopsis)
221 return _command(name, list(options), synopsis)
222 else:
222 else:
223 # for "historical portability":
223 # for "historical portability":
224 # define "@command" annotation locally, because cmdutil.command
224 # define "@command" annotation locally, because cmdutil.command
225 # has been available since 1.9 (or 2daa5179e73f)
225 # has been available since 1.9 (or 2daa5179e73f)
226 def command(name, options=(), synopsis=None, norepo=False):
226 def command(name, options=(), synopsis=None, norepo=False):
227 def decorator(func):
227 def decorator(func):
228 if synopsis:
228 if synopsis:
229 cmdtable[name] = func, list(options), synopsis
229 cmdtable[name] = func, list(options), synopsis
230 else:
230 else:
231 cmdtable[name] = func, list(options)
231 cmdtable[name] = func, list(options)
232 if norepo:
232 if norepo:
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 return func
234 return func
235 return decorator
235 return decorator
236
236
237 try:
237 try:
238 import mercurial.registrar
238 import mercurial.registrar
239 import mercurial.configitems
239 import mercurial.configitems
240 configtable = {}
240 configtable = {}
241 configitem = mercurial.registrar.configitem(configtable)
241 configitem = mercurial.registrar.configitem(configtable)
242 configitem(b'perf', b'presleep',
242 configitem(b'perf', b'presleep',
243 default=mercurial.configitems.dynamicdefault,
243 default=mercurial.configitems.dynamicdefault,
244 )
244 )
245 configitem(b'perf', b'stub',
245 configitem(b'perf', b'stub',
246 default=mercurial.configitems.dynamicdefault,
246 default=mercurial.configitems.dynamicdefault,
247 )
247 )
248 configitem(b'perf', b'parentscount',
248 configitem(b'perf', b'parentscount',
249 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
250 )
250 )
251 configitem(b'perf', b'all-timing',
251 configitem(b'perf', b'all-timing',
252 default=mercurial.configitems.dynamicdefault,
252 default=mercurial.configitems.dynamicdefault,
253 )
253 )
254 configitem(b'perf', b'pre-run',
254 configitem(b'perf', b'pre-run',
255 default=mercurial.configitems.dynamicdefault,
255 default=mercurial.configitems.dynamicdefault,
256 )
256 )
257 configitem(b'perf', b'profile-benchmark',
257 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
258 default=mercurial.configitems.dynamicdefault,
259 )
259 )
260 configitem(b'perf', b'run-limits',
260 configitem(b'perf', b'run-limits',
261 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
262 )
262 )
263 except (ImportError, AttributeError):
263 except (ImportError, AttributeError):
264 pass
264 pass
265
265
266 def getlen(ui):
266 def getlen(ui):
267 if ui.configbool(b"perf", b"stub", False):
267 if ui.configbool(b"perf", b"stub", False):
268 return lambda x: 1
268 return lambda x: 1
269 return len
269 return len
270
270
271 class noop(object):
271 class noop(object):
272 """dummy context manager"""
272 """dummy context manager"""
273 def __enter__(self):
273 def __enter__(self):
274 pass
274 pass
275 def __exit__(self, *args):
275 def __exit__(self, *args):
276 pass
276 pass
277
277
278 NOOPCTX = noop()
278 NOOPCTX = noop()
279
279
280 def gettimer(ui, opts=None):
280 def gettimer(ui, opts=None):
281 """return a timer function and formatter: (timer, formatter)
281 """return a timer function and formatter: (timer, formatter)
282
282
283 This function exists to gather the creation of formatter in a single
283 This function exists to gather the creation of formatter in a single
284 place instead of duplicating it in all performance commands."""
284 place instead of duplicating it in all performance commands."""
285
285
286 # enforce an idle period before execution to counteract power management
286 # enforce an idle period before execution to counteract power management
287 # experimental config: perf.presleep
287 # experimental config: perf.presleep
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
289
289
290 if opts is None:
290 if opts is None:
291 opts = {}
291 opts = {}
292 # redirect all to stderr unless buffer api is in use
292 # redirect all to stderr unless buffer api is in use
293 if not ui._buffers:
293 if not ui._buffers:
294 ui = ui.copy()
294 ui = ui.copy()
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 if uifout:
296 if uifout:
297 # for "historical portability":
297 # for "historical portability":
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 uifout.set(ui.ferr)
299 uifout.set(ui.ferr)
300
300
301 # get a formatter
301 # get a formatter
302 uiformatter = getattr(ui, 'formatter', None)
302 uiformatter = getattr(ui, 'formatter', None)
303 if uiformatter:
303 if uiformatter:
304 fm = uiformatter(b'perf', opts)
304 fm = uiformatter(b'perf', opts)
305 else:
305 else:
306 # for "historical portability":
306 # for "historical portability":
307 # define formatter locally, because ui.formatter has been
307 # define formatter locally, because ui.formatter has been
308 # available since 2.2 (or ae5f92e154d3)
308 # available since 2.2 (or ae5f92e154d3)
309 from mercurial import node
309 from mercurial import node
310 class defaultformatter(object):
310 class defaultformatter(object):
311 """Minimized composition of baseformatter and plainformatter
311 """Minimized composition of baseformatter and plainformatter
312 """
312 """
313 def __init__(self, ui, topic, opts):
313 def __init__(self, ui, topic, opts):
314 self._ui = ui
314 self._ui = ui
315 if ui.debugflag:
315 if ui.debugflag:
316 self.hexfunc = node.hex
316 self.hexfunc = node.hex
317 else:
317 else:
318 self.hexfunc = node.short
318 self.hexfunc = node.short
319 def __nonzero__(self):
319 def __nonzero__(self):
320 return False
320 return False
321 __bool__ = __nonzero__
321 __bool__ = __nonzero__
322 def startitem(self):
322 def startitem(self):
323 pass
323 pass
324 def data(self, **data):
324 def data(self, **data):
325 pass
325 pass
326 def write(self, fields, deftext, *fielddata, **opts):
326 def write(self, fields, deftext, *fielddata, **opts):
327 self._ui.write(deftext % fielddata, **opts)
327 self._ui.write(deftext % fielddata, **opts)
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 if cond:
329 if cond:
330 self._ui.write(deftext % fielddata, **opts)
330 self._ui.write(deftext % fielddata, **opts)
331 def plain(self, text, **opts):
331 def plain(self, text, **opts):
332 self._ui.write(text, **opts)
332 self._ui.write(text, **opts)
333 def end(self):
333 def end(self):
334 pass
334 pass
335 fm = defaultformatter(ui, b'perf', opts)
335 fm = defaultformatter(ui, b'perf', opts)
336
336
337 # stub function, runs code only once instead of in a loop
337 # stub function, runs code only once instead of in a loop
338 # experimental config: perf.stub
338 # experimental config: perf.stub
339 if ui.configbool(b"perf", b"stub", False):
339 if ui.configbool(b"perf", b"stub", False):
340 return functools.partial(stub_timer, fm), fm
340 return functools.partial(stub_timer, fm), fm
341
341
342 # experimental config: perf.all-timing
342 # experimental config: perf.all-timing
343 displayall = ui.configbool(b"perf", b"all-timing", False)
343 displayall = ui.configbool(b"perf", b"all-timing", False)
344
344
345 # experimental config: perf.run-limits
345 # experimental config: perf.run-limits
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 limits = []
347 limits = []
348 for item in limitspec:
348 for item in limitspec:
349 parts = item.split(b'-', 1)
349 parts = item.split(b'-', 1)
350 if len(parts) < 2:
350 if len(parts) < 2:
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 % item))
352 % item))
353 continue
353 continue
354 try:
354 try:
355 time_limit = float(pycompat.sysstr(parts[0]))
355 time_limit = float(pycompat.sysstr(parts[0]))
356 except ValueError as e:
356 except ValueError as e:
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 % (pycompat.bytestr(e), item)))
358 % (pycompat.bytestr(e), item)))
359 continue
359 continue
360 try:
360 try:
361 run_limit = int(pycompat.sysstr(parts[1]))
361 run_limit = int(pycompat.sysstr(parts[1]))
362 except ValueError as e:
362 except ValueError as e:
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 % (pycompat.bytestr(e), item)))
364 % (pycompat.bytestr(e), item)))
365 continue
365 continue
366 limits.append((time_limit, run_limit))
366 limits.append((time_limit, run_limit))
367 if not limits:
367 if not limits:
368 limits = DEFAULTLIMITS
368 limits = DEFAULTLIMITS
369
369
370 profiler = None
370 profiler = None
371 if profiling is not None:
371 if profiling is not None:
372 if ui.configbool(b"perf", b"profile-benchmark", False):
372 if ui.configbool(b"perf", b"profile-benchmark", False):
373 profiler = profiling.profile(ui)
373 profiler = profiling.profile(ui)
374
374
375 prerun = getint(ui, b"perf", b"pre-run", 0)
375 prerun = getint(ui, b"perf", b"pre-run", 0)
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 prerun=prerun, profiler=profiler)
377 prerun=prerun, profiler=profiler)
378 return t, fm
378 return t, fm
379
379
380 def stub_timer(fm, func, setup=None, title=None):
380 def stub_timer(fm, func, setup=None, title=None):
381 if setup is not None:
381 if setup is not None:
382 setup()
382 setup()
383 func()
383 func()
384
384
385 @contextlib.contextmanager
385 @contextlib.contextmanager
386 def timeone():
386 def timeone():
387 r = []
387 r = []
388 ostart = os.times()
388 ostart = os.times()
389 cstart = util.timer()
389 cstart = util.timer()
390 yield r
390 yield r
391 cstop = util.timer()
391 cstop = util.timer()
392 ostop = os.times()
392 ostop = os.times()
393 a, b = ostart, ostop
393 a, b = ostart, ostop
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395
395
396
396
397 # list of stop condition (elapsed time, minimal run count)
397 # list of stop condition (elapsed time, minimal run count)
398 DEFAULTLIMITS = (
398 DEFAULTLIMITS = (
399 (3.0, 100),
399 (3.0, 100),
400 (10.0, 3),
400 (10.0, 3),
401 )
401 )
402
402
403 def _timer(fm, func, setup=None, title=None, displayall=False,
403 def _timer(fm, func, setup=None, title=None, displayall=False,
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 gc.collect()
405 gc.collect()
406 results = []
406 results = []
407 begin = util.timer()
407 begin = util.timer()
408 count = 0
408 count = 0
409 if profiler is None:
409 if profiler is None:
410 profiler = NOOPCTX
410 profiler = NOOPCTX
411 for i in range(prerun):
411 for i in range(prerun):
412 if setup is not None:
412 if setup is not None:
413 setup()
413 setup()
414 func()
414 func()
415 keepgoing = True
415 keepgoing = True
416 while keepgoing:
416 while keepgoing:
417 if setup is not None:
417 if setup is not None:
418 setup()
418 setup()
419 with profiler:
419 with profiler:
420 with timeone() as item:
420 with timeone() as item:
421 r = func()
421 r = func()
422 profiler = NOOPCTX
422 profiler = NOOPCTX
423 count += 1
423 count += 1
424 results.append(item[0])
424 results.append(item[0])
425 cstop = util.timer()
425 cstop = util.timer()
426 # Look for a stop condition.
426 # Look for a stop condition.
427 elapsed = cstop - begin
427 elapsed = cstop - begin
428 for t, mincount in limits:
428 for t, mincount in limits:
429 if elapsed >= t and count >= mincount:
429 if elapsed >= t and count >= mincount:
430 keepgoing = False
430 keepgoing = False
431 break
431 break
432
432
433 formatone(fm, results, title=title, result=r,
433 formatone(fm, results, title=title, result=r,
434 displayall=displayall)
434 displayall=displayall)
435
435
436 def formatone(fm, timings, title=None, result=None, displayall=False):
436 def formatone(fm, timings, title=None, result=None, displayall=False):
437
437
438 count = len(timings)
438 count = len(timings)
439
439
440 fm.startitem()
440 fm.startitem()
441
441
442 if title:
442 if title:
443 fm.write(b'title', b'! %s\n', title)
443 fm.write(b'title', b'! %s\n', title)
444 if result:
444 if result:
445 fm.write(b'result', b'! result: %s\n', result)
445 fm.write(b'result', b'! result: %s\n', result)
446 def display(role, entry):
446 def display(role, entry):
447 prefix = b''
447 prefix = b''
448 if role != b'best':
448 if role != b'best':
449 prefix = b'%s.' % role
449 prefix = b'%s.' % role
450 fm.plain(b'!')
450 fm.plain(b'!')
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 fm.write(prefix + b'user', b' user %f', entry[1])
453 fm.write(prefix + b'user', b' user %f', entry[1])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 fm.plain(b'\n')
456 fm.plain(b'\n')
457 timings.sort()
457 timings.sort()
458 min_val = timings[0]
458 min_val = timings[0]
459 display(b'best', min_val)
459 display(b'best', min_val)
460 if displayall:
460 if displayall:
461 max_val = timings[-1]
461 max_val = timings[-1]
462 display(b'max', max_val)
462 display(b'max', max_val)
463 avg = tuple([sum(x) / count for x in zip(*timings)])
463 avg = tuple([sum(x) / count for x in zip(*timings)])
464 display(b'avg', avg)
464 display(b'avg', avg)
465 median = timings[len(timings) // 2]
465 median = timings[len(timings) // 2]
466 display(b'median', median)
466 display(b'median', median)
467
467
468 # utilities for historical portability
468 # utilities for historical portability
469
469
470 def getint(ui, section, name, default):
470 def getint(ui, section, name, default):
471 # for "historical portability":
471 # for "historical portability":
472 # ui.configint has been available since 1.9 (or fa2b596db182)
472 # ui.configint has been available since 1.9 (or fa2b596db182)
473 v = ui.config(section, name, None)
473 v = ui.config(section, name, None)
474 if v is None:
474 if v is None:
475 return default
475 return default
476 try:
476 try:
477 return int(v)
477 return int(v)
478 except ValueError:
478 except ValueError:
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 % (section, name, v))
480 % (section, name, v))
481
481
482 def safeattrsetter(obj, name, ignoremissing=False):
482 def safeattrsetter(obj, name, ignoremissing=False):
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484
484
485 This function is aborted, if 'obj' doesn't have 'name' attribute
485 This function is aborted, if 'obj' doesn't have 'name' attribute
486 at runtime. This avoids overlooking removal of an attribute, which
486 at runtime. This avoids overlooking removal of an attribute, which
487 breaks assumption of performance measurement, in the future.
487 breaks assumption of performance measurement, in the future.
488
488
489 This function returns the object to (1) assign a new value, and
489 This function returns the object to (1) assign a new value, and
490 (2) restore an original value to the attribute.
490 (2) restore an original value to the attribute.
491
491
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 abortion, and this function returns None. This is useful to
493 abortion, and this function returns None. This is useful to
494 examine an attribute, which isn't ensured in all Mercurial
494 examine an attribute, which isn't ensured in all Mercurial
495 versions.
495 versions.
496 """
496 """
497 if not util.safehasattr(obj, name):
497 if not util.safehasattr(obj, name):
498 if ignoremissing:
498 if ignoremissing:
499 return None
499 return None
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 b" of performance measurement") % (name, obj))
501 b" of performance measurement") % (name, obj))
502
502
503 origvalue = getattr(obj, _sysstr(name))
503 origvalue = getattr(obj, _sysstr(name))
504 class attrutil(object):
504 class attrutil(object):
505 def set(self, newvalue):
505 def set(self, newvalue):
506 setattr(obj, _sysstr(name), newvalue)
506 setattr(obj, _sysstr(name), newvalue)
507 def restore(self):
507 def restore(self):
508 setattr(obj, _sysstr(name), origvalue)
508 setattr(obj, _sysstr(name), origvalue)
509
509
510 return attrutil()
510 return attrutil()
511
511
512 # utilities to examine each internal API changes
512 # utilities to examine each internal API changes
513
513
514 def getbranchmapsubsettable():
514 def getbranchmapsubsettable():
515 # for "historical portability":
515 # for "historical portability":
516 # subsettable is defined in:
516 # subsettable is defined in:
517 # - branchmap since 2.9 (or 175c6fd8cacc)
517 # - branchmap since 2.9 (or 175c6fd8cacc)
518 # - repoview since 2.5 (or 59a9f18d4587)
518 # - repoview since 2.5 (or 59a9f18d4587)
519 # - repoviewutil since 5.0
519 # - repoviewutil since 5.0
520 for mod in (branchmap, repoview, repoviewutil):
520 for mod in (branchmap, repoview, repoviewutil):
521 subsettable = getattr(mod, 'subsettable', None)
521 subsettable = getattr(mod, 'subsettable', None)
522 if subsettable:
522 if subsettable:
523 return subsettable
523 return subsettable
524
524
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 # branchmap and repoview modules exist, but subsettable attribute
526 # branchmap and repoview modules exist, but subsettable attribute
527 # doesn't)
527 # doesn't)
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 hint=b"use 2.5 or later")
529 hint=b"use 2.5 or later")
530
530
531 def getsvfs(repo):
531 def getsvfs(repo):
532 """Return appropriate object to access files under .hg/store
532 """Return appropriate object to access files under .hg/store
533 """
533 """
534 # for "historical portability":
534 # for "historical portability":
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 svfs = getattr(repo, 'svfs', None)
536 svfs = getattr(repo, 'svfs', None)
537 if svfs:
537 if svfs:
538 return svfs
538 return svfs
539 else:
539 else:
540 return getattr(repo, 'sopener')
540 return getattr(repo, 'sopener')
541
541
542 def getvfs(repo):
542 def getvfs(repo):
543 """Return appropriate object to access files under .hg
543 """Return appropriate object to access files under .hg
544 """
544 """
545 # for "historical portability":
545 # for "historical portability":
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 vfs = getattr(repo, 'vfs', None)
547 vfs = getattr(repo, 'vfs', None)
548 if vfs:
548 if vfs:
549 return vfs
549 return vfs
550 else:
550 else:
551 return getattr(repo, 'opener')
551 return getattr(repo, 'opener')
552
552
553 def repocleartagscachefunc(repo):
553 def repocleartagscachefunc(repo):
554 """Return the function to clear tags cache according to repo internal API
554 """Return the function to clear tags cache according to repo internal API
555 """
555 """
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 # correct way to clear tags cache, because existing code paths
558 # correct way to clear tags cache, because existing code paths
559 # expect _tagscache to be a structured object.
559 # expect _tagscache to be a structured object.
560 def clearcache():
560 def clearcache():
561 # _tagscache has been filteredpropertycache since 2.5 (or
561 # _tagscache has been filteredpropertycache since 2.5 (or
562 # 98c867ac1330), and delattr() can't work in such case
562 # 98c867ac1330), and delattr() can't work in such case
563 if b'_tagscache' in vars(repo):
563 if b'_tagscache' in vars(repo):
564 del repo.__dict__[b'_tagscache']
564 del repo.__dict__[b'_tagscache']
565 return clearcache
565 return clearcache
566
566
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 if repotags: # since 1.4 (or 5614a628d173)
568 if repotags: # since 1.4 (or 5614a628d173)
569 return lambda : repotags.set(None)
569 return lambda : repotags.set(None)
570
570
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
573 return lambda : repotagscache.set(None)
573 return lambda : repotagscache.set(None)
574
574
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 # this point, but it isn't so problematic, because:
576 # this point, but it isn't so problematic, because:
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 # in perftags() causes failure soon
578 # in perftags() causes failure soon
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 raise error.Abort((b"tags API of this hg command is unknown"))
580 raise error.Abort((b"tags API of this hg command is unknown"))
581
581
582 # utilities to clear cache
582 # utilities to clear cache
583
583
584 def clearfilecache(obj, attrname):
584 def clearfilecache(obj, attrname):
585 unfiltered = getattr(obj, 'unfiltered', None)
585 unfiltered = getattr(obj, 'unfiltered', None)
586 if unfiltered is not None:
586 if unfiltered is not None:
587 obj = obj.unfiltered()
587 obj = obj.unfiltered()
588 if attrname in vars(obj):
588 if attrname in vars(obj):
589 delattr(obj, attrname)
589 delattr(obj, attrname)
590 obj._filecache.pop(attrname, None)
590 obj._filecache.pop(attrname, None)
591
591
592 def clearchangelog(repo):
592 def clearchangelog(repo):
593 if repo is not repo.unfiltered():
593 if repo is not repo.unfiltered():
594 object.__setattr__(repo, r'_clcachekey', None)
594 object.__setattr__(repo, r'_clcachekey', None)
595 object.__setattr__(repo, r'_clcache', None)
595 object.__setattr__(repo, r'_clcache', None)
596 clearfilecache(repo.unfiltered(), 'changelog')
596 clearfilecache(repo.unfiltered(), 'changelog')
597
597
598 # perf commands
598 # perf commands
599
599
600 @command(b'perfwalk', formatteropts)
600 @command(b'perfwalk', formatteropts)
601 def perfwalk(ui, repo, *pats, **opts):
601 def perfwalk(ui, repo, *pats, **opts):
602 opts = _byteskwargs(opts)
602 opts = _byteskwargs(opts)
603 timer, fm = gettimer(ui, opts)
603 timer, fm = gettimer(ui, opts)
604 m = scmutil.match(repo[None], pats, {})
604 m = scmutil.match(repo[None], pats, {})
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 ignored=False))))
606 ignored=False))))
607 fm.end()
607 fm.end()
608
608
609 @command(b'perfannotate', formatteropts)
609 @command(b'perfannotate', formatteropts)
610 def perfannotate(ui, repo, f, **opts):
610 def perfannotate(ui, repo, f, **opts):
611 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
612 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
613 fc = repo[b'.'][f]
613 fc = repo[b'.'][f]
614 timer(lambda: len(fc.annotate(True)))
614 timer(lambda: len(fc.annotate(True)))
615 fm.end()
615 fm.end()
616
616
617 @command(b'perfstatus',
617 @command(b'perfstatus',
618 [(b'u', b'unknown', False,
618 [(b'u', b'unknown', False,
619 b'ask status to look for unknown files')] + formatteropts)
619 b'ask status to look for unknown files')] + formatteropts)
620 def perfstatus(ui, repo, **opts):
620 def perfstatus(ui, repo, **opts):
621 opts = _byteskwargs(opts)
621 opts = _byteskwargs(opts)
622 #m = match.always(repo.root, repo.getcwd())
622 #m = match.always(repo.root, repo.getcwd())
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 # False))))
624 # False))))
625 timer, fm = gettimer(ui, opts)
625 timer, fm = gettimer(ui, opts)
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 fm.end()
627 fm.end()
628
628
629 @command(b'perfaddremove', formatteropts)
629 @command(b'perfaddremove', formatteropts)
630 def perfaddremove(ui, repo, **opts):
630 def perfaddremove(ui, repo, **opts):
631 opts = _byteskwargs(opts)
631 opts = _byteskwargs(opts)
632 timer, fm = gettimer(ui, opts)
632 timer, fm = gettimer(ui, opts)
633 try:
633 try:
634 oldquiet = repo.ui.quiet
634 oldquiet = repo.ui.quiet
635 repo.ui.quiet = True
635 repo.ui.quiet = True
636 matcher = scmutil.match(repo[None])
636 matcher = scmutil.match(repo[None])
637 opts[b'dry_run'] = True
637 opts[b'dry_run'] = True
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 uipathfn = scmutil.getuipathfn(repo)
639 uipathfn = scmutil.getuipathfn(repo)
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 else:
641 else:
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 finally:
643 finally:
644 repo.ui.quiet = oldquiet
644 repo.ui.quiet = oldquiet
645 fm.end()
645 fm.end()
646
646
647 def clearcaches(cl):
647 def clearcaches(cl):
648 # behave somewhat consistently across internal API changes
648 # behave somewhat consistently across internal API changes
649 if util.safehasattr(cl, b'clearcaches'):
649 if util.safehasattr(cl, b'clearcaches'):
650 cl.clearcaches()
650 cl.clearcaches()
651 elif util.safehasattr(cl, b'_nodecache'):
651 elif util.safehasattr(cl, b'_nodecache'):
652 from mercurial.node import nullid, nullrev
652 from mercurial.node import nullid, nullrev
653 cl._nodecache = {nullid: nullrev}
653 cl._nodecache = {nullid: nullrev}
654 cl._nodepos = None
654 cl._nodepos = None
655
655
656 @command(b'perfheads', formatteropts)
656 @command(b'perfheads', formatteropts)
657 def perfheads(ui, repo, **opts):
657 def perfheads(ui, repo, **opts):
658 """benchmark the computation of a changelog heads"""
658 """benchmark the computation of a changelog heads"""
659 opts = _byteskwargs(opts)
659 opts = _byteskwargs(opts)
660 timer, fm = gettimer(ui, opts)
660 timer, fm = gettimer(ui, opts)
661 cl = repo.changelog
661 cl = repo.changelog
662 def s():
662 def s():
663 clearcaches(cl)
663 clearcaches(cl)
664 def d():
664 def d():
665 len(cl.headrevs())
665 len(cl.headrevs())
666 timer(d, setup=s)
666 timer(d, setup=s)
667 fm.end()
667 fm.end()
668
668
669 @command(b'perftags', formatteropts+
669 @command(b'perftags', formatteropts+
670 [
670 [
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 ])
672 ])
673 def perftags(ui, repo, **opts):
673 def perftags(ui, repo, **opts):
674 opts = _byteskwargs(opts)
674 opts = _byteskwargs(opts)
675 timer, fm = gettimer(ui, opts)
675 timer, fm = gettimer(ui, opts)
676 repocleartagscache = repocleartagscachefunc(repo)
676 repocleartagscache = repocleartagscachefunc(repo)
677 clearrevlogs = opts[b'clear_revlogs']
677 clearrevlogs = opts[b'clear_revlogs']
678 def s():
678 def s():
679 if clearrevlogs:
679 if clearrevlogs:
680 clearchangelog(repo)
680 clearchangelog(repo)
681 clearfilecache(repo.unfiltered(), 'manifest')
681 clearfilecache(repo.unfiltered(), 'manifest')
682 repocleartagscache()
682 repocleartagscache()
683 def t():
683 def t():
684 return len(repo.tags())
684 return len(repo.tags())
685 timer(t, setup=s)
685 timer(t, setup=s)
686 fm.end()
686 fm.end()
687
687
688 @command(b'perfancestors', formatteropts)
688 @command(b'perfancestors', formatteropts)
689 def perfancestors(ui, repo, **opts):
689 def perfancestors(ui, repo, **opts):
690 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
691 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
692 heads = repo.changelog.headrevs()
692 heads = repo.changelog.headrevs()
693 def d():
693 def d():
694 for a in repo.changelog.ancestors(heads):
694 for a in repo.changelog.ancestors(heads):
695 pass
695 pass
696 timer(d)
696 timer(d)
697 fm.end()
697 fm.end()
698
698
699 @command(b'perfancestorset', formatteropts)
699 @command(b'perfancestorset', formatteropts)
700 def perfancestorset(ui, repo, revset, **opts):
700 def perfancestorset(ui, repo, revset, **opts):
701 opts = _byteskwargs(opts)
701 opts = _byteskwargs(opts)
702 timer, fm = gettimer(ui, opts)
702 timer, fm = gettimer(ui, opts)
703 revs = repo.revs(revset)
703 revs = repo.revs(revset)
704 heads = repo.changelog.headrevs()
704 heads = repo.changelog.headrevs()
705 def d():
705 def d():
706 s = repo.changelog.ancestors(heads)
706 s = repo.changelog.ancestors(heads)
707 for rev in revs:
707 for rev in revs:
708 rev in s
708 rev in s
709 timer(d)
709 timer(d)
710 fm.end()
710 fm.end()
711
711
712 @command(b'perfdiscovery', formatteropts, b'PATH')
712 @command(b'perfdiscovery', formatteropts, b'PATH')
713 def perfdiscovery(ui, repo, path, **opts):
713 def perfdiscovery(ui, repo, path, **opts):
714 """benchmark discovery between local repo and the peer at given path
714 """benchmark discovery between local repo and the peer at given path
715 """
715 """
716 repos = [repo, None]
716 repos = [repo, None]
717 timer, fm = gettimer(ui, opts)
717 timer, fm = gettimer(ui, opts)
718 path = ui.expandpath(path)
718 path = ui.expandpath(path)
719
719
720 def s():
720 def s():
721 repos[1] = hg.peer(ui, opts, path)
721 repos[1] = hg.peer(ui, opts, path)
722 def d():
722 def d():
723 setdiscovery.findcommonheads(ui, *repos)
723 setdiscovery.findcommonheads(ui, *repos)
724 timer(d, setup=s)
724 timer(d, setup=s)
725 fm.end()
725 fm.end()
726
726
727 @command(b'perfbookmarks', formatteropts +
727 @command(b'perfbookmarks', formatteropts +
728 [
728 [
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 ])
730 ])
731 def perfbookmarks(ui, repo, **opts):
731 def perfbookmarks(ui, repo, **opts):
732 """benchmark parsing bookmarks from disk to memory"""
732 """benchmark parsing bookmarks from disk to memory"""
733 opts = _byteskwargs(opts)
733 opts = _byteskwargs(opts)
734 timer, fm = gettimer(ui, opts)
734 timer, fm = gettimer(ui, opts)
735
735
736 clearrevlogs = opts[b'clear_revlogs']
736 clearrevlogs = opts[b'clear_revlogs']
737 def s():
737 def s():
738 if clearrevlogs:
738 if clearrevlogs:
739 clearchangelog(repo)
739 clearchangelog(repo)
740 clearfilecache(repo, b'_bookmarks')
740 clearfilecache(repo, b'_bookmarks')
741 def d():
741 def d():
742 repo._bookmarks
742 repo._bookmarks
743 timer(d, setup=s)
743 timer(d, setup=s)
744 fm.end()
744 fm.end()
745
745
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 def perfbundleread(ui, repo, bundlepath, **opts):
747 def perfbundleread(ui, repo, bundlepath, **opts):
748 """Benchmark reading of bundle files.
748 """Benchmark reading of bundle files.
749
749
750 This command is meant to isolate the I/O part of bundle reading as
750 This command is meant to isolate the I/O part of bundle reading as
751 much as possible.
751 much as possible.
752 """
752 """
753 from mercurial import (
753 from mercurial import (
754 bundle2,
754 bundle2,
755 exchange,
755 exchange,
756 streamclone,
756 streamclone,
757 )
757 )
758
758
759 opts = _byteskwargs(opts)
759 opts = _byteskwargs(opts)
760
760
761 def makebench(fn):
761 def makebench(fn):
762 def run():
762 def run():
763 with open(bundlepath, b'rb') as fh:
763 with open(bundlepath, b'rb') as fh:
764 bundle = exchange.readbundle(ui, fh, bundlepath)
764 bundle = exchange.readbundle(ui, fh, bundlepath)
765 fn(bundle)
765 fn(bundle)
766
766
767 return run
767 return run
768
768
769 def makereadnbytes(size):
769 def makereadnbytes(size):
770 def run():
770 def run():
771 with open(bundlepath, b'rb') as fh:
771 with open(bundlepath, b'rb') as fh:
772 bundle = exchange.readbundle(ui, fh, bundlepath)
772 bundle = exchange.readbundle(ui, fh, bundlepath)
773 while bundle.read(size):
773 while bundle.read(size):
774 pass
774 pass
775
775
776 return run
776 return run
777
777
778 def makestdioread(size):
778 def makestdioread(size):
779 def run():
779 def run():
780 with open(bundlepath, b'rb') as fh:
780 with open(bundlepath, b'rb') as fh:
781 while fh.read(size):
781 while fh.read(size):
782 pass
782 pass
783
783
784 return run
784 return run
785
785
786 # bundle1
786 # bundle1
787
787
788 def deltaiter(bundle):
788 def deltaiter(bundle):
789 for delta in bundle.deltaiter():
789 for delta in bundle.deltaiter():
790 pass
790 pass
791
791
792 def iterchunks(bundle):
792 def iterchunks(bundle):
793 for chunk in bundle.getchunks():
793 for chunk in bundle.getchunks():
794 pass
794 pass
795
795
796 # bundle2
796 # bundle2
797
797
798 def forwardchunks(bundle):
798 def forwardchunks(bundle):
799 for chunk in bundle._forwardchunks():
799 for chunk in bundle._forwardchunks():
800 pass
800 pass
801
801
802 def iterparts(bundle):
802 def iterparts(bundle):
803 for part in bundle.iterparts():
803 for part in bundle.iterparts():
804 pass
804 pass
805
805
806 def iterpartsseekable(bundle):
806 def iterpartsseekable(bundle):
807 for part in bundle.iterparts(seekable=True):
807 for part in bundle.iterparts(seekable=True):
808 pass
808 pass
809
809
810 def seek(bundle):
810 def seek(bundle):
811 for part in bundle.iterparts(seekable=True):
811 for part in bundle.iterparts(seekable=True):
812 part.seek(0, os.SEEK_END)
812 part.seek(0, os.SEEK_END)
813
813
814 def makepartreadnbytes(size):
814 def makepartreadnbytes(size):
815 def run():
815 def run():
816 with open(bundlepath, b'rb') as fh:
816 with open(bundlepath, b'rb') as fh:
817 bundle = exchange.readbundle(ui, fh, bundlepath)
817 bundle = exchange.readbundle(ui, fh, bundlepath)
818 for part in bundle.iterparts():
818 for part in bundle.iterparts():
819 while part.read(size):
819 while part.read(size):
820 pass
820 pass
821
821
822 return run
822 return run
823
823
824 benches = [
824 benches = [
825 (makestdioread(8192), b'read(8k)'),
825 (makestdioread(8192), b'read(8k)'),
826 (makestdioread(16384), b'read(16k)'),
826 (makestdioread(16384), b'read(16k)'),
827 (makestdioread(32768), b'read(32k)'),
827 (makestdioread(32768), b'read(32k)'),
828 (makestdioread(131072), b'read(128k)'),
828 (makestdioread(131072), b'read(128k)'),
829 ]
829 ]
830
830
831 with open(bundlepath, b'rb') as fh:
831 with open(bundlepath, b'rb') as fh:
832 bundle = exchange.readbundle(ui, fh, bundlepath)
832 bundle = exchange.readbundle(ui, fh, bundlepath)
833
833
834 if isinstance(bundle, changegroup.cg1unpacker):
834 if isinstance(bundle, changegroup.cg1unpacker):
835 benches.extend([
835 benches.extend([
836 (makebench(deltaiter), b'cg1 deltaiter()'),
836 (makebench(deltaiter), b'cg1 deltaiter()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
842 ])
842 ])
843 elif isinstance(bundle, bundle2.unbundle20):
843 elif isinstance(bundle, bundle2.unbundle20):
844 benches.extend([
844 benches.extend([
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 (makebench(seek), b'bundle2 part seek()'),
848 (makebench(seek), b'bundle2 part seek()'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 ])
853 ])
854 elif isinstance(bundle, streamclone.streamcloneapplier):
854 elif isinstance(bundle, streamclone.streamcloneapplier):
855 raise error.Abort(b'stream clone bundles not supported')
855 raise error.Abort(b'stream clone bundles not supported')
856 else:
856 else:
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858
858
859 for fn, title in benches:
859 for fn, title in benches:
860 timer, fm = gettimer(ui, opts)
860 timer, fm = gettimer(ui, opts)
861 timer(fn, title=title)
861 timer(fn, title=title)
862 fm.end()
862 fm.end()
863
863
864 @command(b'perfchangegroupchangelog', formatteropts +
864 @command(b'perfchangegroupchangelog', formatteropts +
865 [(b'', b'cgversion', b'02', b'changegroup version'),
865 [(b'', b'cgversion', b'02', b'changegroup version'),
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 """Benchmark producing a changelog group for a changegroup.
868 """Benchmark producing a changelog group for a changegroup.
869
869
870 This measures the time spent processing the changelog during a
870 This measures the time spent processing the changelog during a
871 bundle operation. This occurs during `hg bundle` and on a server
871 bundle operation. This occurs during `hg bundle` and on a server
872 processing a `getbundle` wire protocol request (handles clones
872 processing a `getbundle` wire protocol request (handles clones
873 and pull requests).
873 and pull requests).
874
874
875 By default, all revisions are added to the changegroup.
875 By default, all revisions are added to the changegroup.
876 """
876 """
877 opts = _byteskwargs(opts)
877 opts = _byteskwargs(opts)
878 cl = repo.changelog
878 cl = repo.changelog
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 bundler = changegroup.getbundler(cgversion, repo)
880 bundler = changegroup.getbundler(cgversion, repo)
881
881
882 def d():
882 def d():
883 state, chunks = bundler._generatechangelog(cl, nodes)
883 state, chunks = bundler._generatechangelog(cl, nodes)
884 for chunk in chunks:
884 for chunk in chunks:
885 pass
885 pass
886
886
887 timer, fm = gettimer(ui, opts)
887 timer, fm = gettimer(ui, opts)
888
888
889 # Terminal printing can interfere with timing. So disable it.
889 # Terminal printing can interfere with timing. So disable it.
890 with ui.configoverride({(b'progress', b'disable'): True}):
890 with ui.configoverride({(b'progress', b'disable'): True}):
891 timer(d)
891 timer(d)
892
892
893 fm.end()
893 fm.end()
894
894
895 @command(b'perfdirs', formatteropts)
895 @command(b'perfdirs', formatteropts)
896 def perfdirs(ui, repo, **opts):
896 def perfdirs(ui, repo, **opts):
897 opts = _byteskwargs(opts)
897 opts = _byteskwargs(opts)
898 timer, fm = gettimer(ui, opts)
898 timer, fm = gettimer(ui, opts)
899 dirstate = repo.dirstate
899 dirstate = repo.dirstate
900 b'a' in dirstate
900 b'a' in dirstate
901 def d():
901 def d():
902 dirstate.hasdir(b'a')
902 dirstate.hasdir(b'a')
903 del dirstate._map._dirs
903 del dirstate._map._dirs
904 timer(d)
904 timer(d)
905 fm.end()
905 fm.end()
906
906
907 @command(b'perfdirstate', formatteropts)
907 @command(b'perfdirstate', formatteropts)
908 def perfdirstate(ui, repo, **opts):
908 def perfdirstate(ui, repo, **opts):
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911 b"a" in repo.dirstate
911 b"a" in repo.dirstate
912 def d():
912 def d():
913 repo.dirstate.invalidate()
913 repo.dirstate.invalidate()
914 b"a" in repo.dirstate
914 b"a" in repo.dirstate
915 timer(d)
915 timer(d)
916 fm.end()
916 fm.end()
917
917
918 @command(b'perfdirstatedirs', formatteropts)
918 @command(b'perfdirstatedirs', formatteropts)
919 def perfdirstatedirs(ui, repo, **opts):
919 def perfdirstatedirs(ui, repo, **opts):
920 opts = _byteskwargs(opts)
920 opts = _byteskwargs(opts)
921 timer, fm = gettimer(ui, opts)
921 timer, fm = gettimer(ui, opts)
922 b"a" in repo.dirstate
922 b"a" in repo.dirstate
923 def d():
923 def d():
924 repo.dirstate.hasdir(b"a")
924 repo.dirstate.hasdir(b"a")
925 del repo.dirstate._map._dirs
925 del repo.dirstate._map._dirs
926 timer(d)
926 timer(d)
927 fm.end()
927 fm.end()
928
928
929 @command(b'perfdirstatefoldmap', formatteropts)
929 @command(b'perfdirstatefoldmap', formatteropts)
930 def perfdirstatefoldmap(ui, repo, **opts):
930 def perfdirstatefoldmap(ui, repo, **opts):
931 opts = _byteskwargs(opts)
931 opts = _byteskwargs(opts)
932 timer, fm = gettimer(ui, opts)
932 timer, fm = gettimer(ui, opts)
933 dirstate = repo.dirstate
933 dirstate = repo.dirstate
934 b'a' in dirstate
934 b'a' in dirstate
935 def d():
935 def d():
936 dirstate._map.filefoldmap.get(b'a')
936 dirstate._map.filefoldmap.get(b'a')
937 del dirstate._map.filefoldmap
937 del dirstate._map.filefoldmap
938 timer(d)
938 timer(d)
939 fm.end()
939 fm.end()
940
940
941 @command(b'perfdirfoldmap', formatteropts)
941 @command(b'perfdirfoldmap', formatteropts)
942 def perfdirfoldmap(ui, repo, **opts):
942 def perfdirfoldmap(ui, repo, **opts):
943 opts = _byteskwargs(opts)
943 opts = _byteskwargs(opts)
944 timer, fm = gettimer(ui, opts)
944 timer, fm = gettimer(ui, opts)
945 dirstate = repo.dirstate
945 dirstate = repo.dirstate
946 b'a' in dirstate
946 b'a' in dirstate
947 def d():
947 def d():
948 dirstate._map.dirfoldmap.get(b'a')
948 dirstate._map.dirfoldmap.get(b'a')
949 del dirstate._map.dirfoldmap
949 del dirstate._map.dirfoldmap
950 del dirstate._map._dirs
950 del dirstate._map._dirs
951 timer(d)
951 timer(d)
952 fm.end()
952 fm.end()
953
953
954 @command(b'perfdirstatewrite', formatteropts)
954 @command(b'perfdirstatewrite', formatteropts)
955 def perfdirstatewrite(ui, repo, **opts):
955 def perfdirstatewrite(ui, repo, **opts):
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957 timer, fm = gettimer(ui, opts)
957 timer, fm = gettimer(ui, opts)
958 ds = repo.dirstate
958 ds = repo.dirstate
959 b"a" in ds
959 b"a" in ds
960 def d():
960 def d():
961 ds._dirty = True
961 ds._dirty = True
962 ds.write(repo.currenttransaction())
962 ds.write(repo.currenttransaction())
963 timer(d)
963 timer(d)
964 fm.end()
964 fm.end()
965
965
966 @command(b'perfmergecalculate',
966 def _getmergerevs(repo, opts):
967 [
967 """parse command argument to return rev involved in merge
968 (b'r', b'rev', b'.', b'rev to merge against'),
968
969 (b'', b'from', b'', b'rev to merge from'),
969 input: options dictionnary with `rev`, `from` and `bse`
970 (b'', b'base', b'', b'the revision to use as base'),
970 output: (localctx, otherctx, basectx)
971 ] + formatteropts)
971 """
972 def perfmergecalculate(ui, repo, rev, **opts):
973 opts = _byteskwargs(opts)
974 timer, fm = gettimer(ui, opts)
975
976 if opts['from']:
972 if opts['from']:
977 fromrev = scmutil.revsingle(repo, opts['from'])
973 fromrev = scmutil.revsingle(repo, opts['from'])
978 wctx = repo[fromrev]
974 wctx = repo[fromrev]
979 else:
975 else:
980 wctx = repo[None]
976 wctx = repo[None]
981 # we don't want working dir files to be stat'd in the benchmark, so
977 # we don't want working dir files to be stat'd in the benchmark, so
982 # prime that cache
978 # prime that cache
983 wctx.dirty()
979 wctx.dirty()
984 rctx = scmutil.revsingle(repo, rev, rev)
980 rctx = scmutil.revsingle(repo, opts['rev'], opts['rev'])
985 if opts['base']:
981 if opts['base']:
986 fromrev = scmutil.revsingle(repo, opts['base'])
982 fromrev = scmutil.revsingle(repo, opts['base'])
987 ancestor = repo[fromrev]
983 ancestor = repo[fromrev]
988 else:
984 else:
989 ancestor = wctx.ancestor(rctx)
985 ancestor = wctx.ancestor(rctx)
990 def d():
986 return (wctx, rctx, ancestor)
991 # acceptremote is True because we don't want prompts in the middle of
987
992 # our benchmark
988 @command(b'perfmergecalculate',
993 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
989 [
994 acceptremote=True, followcopies=True)
990 (b'r', b'rev', b'.', b'rev to merge against'),
995 timer(d)
991 (b'', b'from', b'', b'rev to merge from'),
996 fm.end()
992 (b'', b'base', b'', b'the revision to use as base'),
993 ] + formatteropts)
994 def perfmergecalculate(ui, repo, **opts):
995 opts = _byteskwargs(opts)
996 timer, fm = gettimer(ui, opts)
997
998 wctx, rctx, ancestor = _getmergerevs(repo, opts)
997 def d():
999 def d():
998 # acceptremote is True because we don't want prompts in the middle of
1000 # acceptremote is True because we don't want prompts in the middle of
999 # our benchmark
1001 # our benchmark
1000 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1002 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1001 acceptremote=True, followcopies=True)
1003 acceptremote=True, followcopies=True)
1002 timer(d)
1004 timer(d)
1003 fm.end()
1005 fm.end()
1004
1006
1005 @command(b'perfpathcopies', [], b"REV REV")
1007 @command(b'perfpathcopies', [], b"REV REV")
1006 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1008 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1007 """benchmark the copy tracing logic"""
1009 """benchmark the copy tracing logic"""
1008 opts = _byteskwargs(opts)
1010 opts = _byteskwargs(opts)
1009 timer, fm = gettimer(ui, opts)
1011 timer, fm = gettimer(ui, opts)
1010 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1012 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1011 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1013 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1012 def d():
1014 def d():
1013 copies.pathcopies(ctx1, ctx2)
1015 copies.pathcopies(ctx1, ctx2)
1014 timer(d)
1016 timer(d)
1015 fm.end()
1017 fm.end()
1016
1018
1017 @command(b'perfphases',
1019 @command(b'perfphases',
1018 [(b'', b'full', False, b'include file reading time too'),
1020 [(b'', b'full', False, b'include file reading time too'),
1019 ], b"")
1021 ], b"")
1020 def perfphases(ui, repo, **opts):
1022 def perfphases(ui, repo, **opts):
1021 """benchmark phasesets computation"""
1023 """benchmark phasesets computation"""
1022 opts = _byteskwargs(opts)
1024 opts = _byteskwargs(opts)
1023 timer, fm = gettimer(ui, opts)
1025 timer, fm = gettimer(ui, opts)
1024 _phases = repo._phasecache
1026 _phases = repo._phasecache
1025 full = opts.get(b'full')
1027 full = opts.get(b'full')
1026 def d():
1028 def d():
1027 phases = _phases
1029 phases = _phases
1028 if full:
1030 if full:
1029 clearfilecache(repo, b'_phasecache')
1031 clearfilecache(repo, b'_phasecache')
1030 phases = repo._phasecache
1032 phases = repo._phasecache
1031 phases.invalidate()
1033 phases.invalidate()
1032 phases.loadphaserevs(repo)
1034 phases.loadphaserevs(repo)
1033 timer(d)
1035 timer(d)
1034 fm.end()
1036 fm.end()
1035
1037
1036 @command(b'perfphasesremote',
1038 @command(b'perfphasesremote',
1037 [], b"[DEST]")
1039 [], b"[DEST]")
1038 def perfphasesremote(ui, repo, dest=None, **opts):
1040 def perfphasesremote(ui, repo, dest=None, **opts):
1039 """benchmark time needed to analyse phases of the remote server"""
1041 """benchmark time needed to analyse phases of the remote server"""
1040 from mercurial.node import (
1042 from mercurial.node import (
1041 bin,
1043 bin,
1042 )
1044 )
1043 from mercurial import (
1045 from mercurial import (
1044 exchange,
1046 exchange,
1045 hg,
1047 hg,
1046 phases,
1048 phases,
1047 )
1049 )
1048 opts = _byteskwargs(opts)
1050 opts = _byteskwargs(opts)
1049 timer, fm = gettimer(ui, opts)
1051 timer, fm = gettimer(ui, opts)
1050
1052
1051 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1053 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1052 if not path:
1054 if not path:
1053 raise error.Abort((b'default repository not configured!'),
1055 raise error.Abort((b'default repository not configured!'),
1054 hint=(b"see 'hg help config.paths'"))
1056 hint=(b"see 'hg help config.paths'"))
1055 dest = path.pushloc or path.loc
1057 dest = path.pushloc or path.loc
1056 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1058 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1057 other = hg.peer(repo, opts, dest)
1059 other = hg.peer(repo, opts, dest)
1058
1060
1059 # easier to perform discovery through the operation
1061 # easier to perform discovery through the operation
1060 op = exchange.pushoperation(repo, other)
1062 op = exchange.pushoperation(repo, other)
1061 exchange._pushdiscoverychangeset(op)
1063 exchange._pushdiscoverychangeset(op)
1062
1064
1063 remotesubset = op.fallbackheads
1065 remotesubset = op.fallbackheads
1064
1066
1065 with other.commandexecutor() as e:
1067 with other.commandexecutor() as e:
1066 remotephases = e.callcommand(b'listkeys',
1068 remotephases = e.callcommand(b'listkeys',
1067 {b'namespace': b'phases'}).result()
1069 {b'namespace': b'phases'}).result()
1068 del other
1070 del other
1069 publishing = remotephases.get(b'publishing', False)
1071 publishing = remotephases.get(b'publishing', False)
1070 if publishing:
1072 if publishing:
1071 ui.status((b'publishing: yes\n'))
1073 ui.status((b'publishing: yes\n'))
1072 else:
1074 else:
1073 ui.status((b'publishing: no\n'))
1075 ui.status((b'publishing: no\n'))
1074
1076
1075 nodemap = repo.changelog.nodemap
1077 nodemap = repo.changelog.nodemap
1076 nonpublishroots = 0
1078 nonpublishroots = 0
1077 for nhex, phase in remotephases.iteritems():
1079 for nhex, phase in remotephases.iteritems():
1078 if nhex == b'publishing': # ignore data related to publish option
1080 if nhex == b'publishing': # ignore data related to publish option
1079 continue
1081 continue
1080 node = bin(nhex)
1082 node = bin(nhex)
1081 if node in nodemap and int(phase):
1083 if node in nodemap and int(phase):
1082 nonpublishroots += 1
1084 nonpublishroots += 1
1083 ui.status((b'number of roots: %d\n') % len(remotephases))
1085 ui.status((b'number of roots: %d\n') % len(remotephases))
1084 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1086 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1085 def d():
1087 def d():
1086 phases.remotephasessummary(repo,
1088 phases.remotephasessummary(repo,
1087 remotesubset,
1089 remotesubset,
1088 remotephases)
1090 remotephases)
1089 timer(d)
1091 timer(d)
1090 fm.end()
1092 fm.end()
1091
1093
1092 @command(b'perfmanifest',[
1094 @command(b'perfmanifest',[
1093 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1095 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1094 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1096 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1095 ] + formatteropts, b'REV|NODE')
1097 ] + formatteropts, b'REV|NODE')
1096 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1098 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1097 """benchmark the time to read a manifest from disk and return a usable
1099 """benchmark the time to read a manifest from disk and return a usable
1098 dict-like object
1100 dict-like object
1099
1101
1100 Manifest caches are cleared before retrieval."""
1102 Manifest caches are cleared before retrieval."""
1101 opts = _byteskwargs(opts)
1103 opts = _byteskwargs(opts)
1102 timer, fm = gettimer(ui, opts)
1104 timer, fm = gettimer(ui, opts)
1103 if not manifest_rev:
1105 if not manifest_rev:
1104 ctx = scmutil.revsingle(repo, rev, rev)
1106 ctx = scmutil.revsingle(repo, rev, rev)
1105 t = ctx.manifestnode()
1107 t = ctx.manifestnode()
1106 else:
1108 else:
1107 from mercurial.node import bin
1109 from mercurial.node import bin
1108
1110
1109 if len(rev) == 40:
1111 if len(rev) == 40:
1110 t = bin(rev)
1112 t = bin(rev)
1111 else:
1113 else:
1112 try:
1114 try:
1113 rev = int(rev)
1115 rev = int(rev)
1114
1116
1115 if util.safehasattr(repo.manifestlog, b'getstorage'):
1117 if util.safehasattr(repo.manifestlog, b'getstorage'):
1116 t = repo.manifestlog.getstorage(b'').node(rev)
1118 t = repo.manifestlog.getstorage(b'').node(rev)
1117 else:
1119 else:
1118 t = repo.manifestlog._revlog.lookup(rev)
1120 t = repo.manifestlog._revlog.lookup(rev)
1119 except ValueError:
1121 except ValueError:
1120 raise error.Abort(b'manifest revision must be integer or full '
1122 raise error.Abort(b'manifest revision must be integer or full '
1121 b'node')
1123 b'node')
1122 def d():
1124 def d():
1123 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1125 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1124 repo.manifestlog[t].read()
1126 repo.manifestlog[t].read()
1125 timer(d)
1127 timer(d)
1126 fm.end()
1128 fm.end()
1127
1129
1128 @command(b'perfchangeset', formatteropts)
1130 @command(b'perfchangeset', formatteropts)
1129 def perfchangeset(ui, repo, rev, **opts):
1131 def perfchangeset(ui, repo, rev, **opts):
1130 opts = _byteskwargs(opts)
1132 opts = _byteskwargs(opts)
1131 timer, fm = gettimer(ui, opts)
1133 timer, fm = gettimer(ui, opts)
1132 n = scmutil.revsingle(repo, rev).node()
1134 n = scmutil.revsingle(repo, rev).node()
1133 def d():
1135 def d():
1134 repo.changelog.read(n)
1136 repo.changelog.read(n)
1135 #repo.changelog._cache = None
1137 #repo.changelog._cache = None
1136 timer(d)
1138 timer(d)
1137 fm.end()
1139 fm.end()
1138
1140
1139 @command(b'perfignore', formatteropts)
1141 @command(b'perfignore', formatteropts)
1140 def perfignore(ui, repo, **opts):
1142 def perfignore(ui, repo, **opts):
1141 """benchmark operation related to computing ignore"""
1143 """benchmark operation related to computing ignore"""
1142 opts = _byteskwargs(opts)
1144 opts = _byteskwargs(opts)
1143 timer, fm = gettimer(ui, opts)
1145 timer, fm = gettimer(ui, opts)
1144 dirstate = repo.dirstate
1146 dirstate = repo.dirstate
1145
1147
1146 def setupone():
1148 def setupone():
1147 dirstate.invalidate()
1149 dirstate.invalidate()
1148 clearfilecache(dirstate, b'_ignore')
1150 clearfilecache(dirstate, b'_ignore')
1149
1151
1150 def runone():
1152 def runone():
1151 dirstate._ignore
1153 dirstate._ignore
1152
1154
1153 timer(runone, setup=setupone, title=b"load")
1155 timer(runone, setup=setupone, title=b"load")
1154 fm.end()
1156 fm.end()
1155
1157
1156 @command(b'perfindex', [
1158 @command(b'perfindex', [
1157 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1159 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1158 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1160 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1159 ] + formatteropts)
1161 ] + formatteropts)
1160 def perfindex(ui, repo, **opts):
1162 def perfindex(ui, repo, **opts):
1161 """benchmark index creation time followed by a lookup
1163 """benchmark index creation time followed by a lookup
1162
1164
1163 The default is to look `tip` up. Depending on the index implementation,
1165 The default is to look `tip` up. Depending on the index implementation,
1164 the revision looked up can matters. For example, an implementation
1166 the revision looked up can matters. For example, an implementation
1165 scanning the index will have a faster lookup time for `--rev tip` than for
1167 scanning the index will have a faster lookup time for `--rev tip` than for
1166 `--rev 0`. The number of looked up revisions and their order can also
1168 `--rev 0`. The number of looked up revisions and their order can also
1167 matters.
1169 matters.
1168
1170
1169 Example of useful set to test:
1171 Example of useful set to test:
1170 * tip
1172 * tip
1171 * 0
1173 * 0
1172 * -10:
1174 * -10:
1173 * :10
1175 * :10
1174 * -10: + :10
1176 * -10: + :10
1175 * :10: + -10:
1177 * :10: + -10:
1176 * -10000:
1178 * -10000:
1177 * -10000: + 0
1179 * -10000: + 0
1178
1180
1179 It is not currently possible to check for lookup of a missing node. For
1181 It is not currently possible to check for lookup of a missing node. For
1180 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1182 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1181 import mercurial.revlog
1183 import mercurial.revlog
1182 opts = _byteskwargs(opts)
1184 opts = _byteskwargs(opts)
1183 timer, fm = gettimer(ui, opts)
1185 timer, fm = gettimer(ui, opts)
1184 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1186 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1185 if opts[b'no_lookup']:
1187 if opts[b'no_lookup']:
1186 if opts['rev']:
1188 if opts['rev']:
1187 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1189 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1188 nodes = []
1190 nodes = []
1189 elif not opts[b'rev']:
1191 elif not opts[b'rev']:
1190 nodes = [repo[b"tip"].node()]
1192 nodes = [repo[b"tip"].node()]
1191 else:
1193 else:
1192 revs = scmutil.revrange(repo, opts[b'rev'])
1194 revs = scmutil.revrange(repo, opts[b'rev'])
1193 cl = repo.changelog
1195 cl = repo.changelog
1194 nodes = [cl.node(r) for r in revs]
1196 nodes = [cl.node(r) for r in revs]
1195
1197
1196 unfi = repo.unfiltered()
1198 unfi = repo.unfiltered()
1197 # find the filecache func directly
1199 # find the filecache func directly
1198 # This avoid polluting the benchmark with the filecache logic
1200 # This avoid polluting the benchmark with the filecache logic
1199 makecl = unfi.__class__.changelog.func
1201 makecl = unfi.__class__.changelog.func
1200 def setup():
1202 def setup():
1201 # probably not necessary, but for good measure
1203 # probably not necessary, but for good measure
1202 clearchangelog(unfi)
1204 clearchangelog(unfi)
1203 def d():
1205 def d():
1204 cl = makecl(unfi)
1206 cl = makecl(unfi)
1205 for n in nodes:
1207 for n in nodes:
1206 cl.rev(n)
1208 cl.rev(n)
1207 timer(d, setup=setup)
1209 timer(d, setup=setup)
1208 fm.end()
1210 fm.end()
1209
1211
1210 @command(b'perfnodemap', [
1212 @command(b'perfnodemap', [
1211 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1213 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1212 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1214 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1213 ] + formatteropts)
1215 ] + formatteropts)
1214 def perfnodemap(ui, repo, **opts):
1216 def perfnodemap(ui, repo, **opts):
1215 """benchmark the time necessary to look up revision from a cold nodemap
1217 """benchmark the time necessary to look up revision from a cold nodemap
1216
1218
1217 Depending on the implementation, the amount and order of revision we look
1219 Depending on the implementation, the amount and order of revision we look
1218 up can varies. Example of useful set to test:
1220 up can varies. Example of useful set to test:
1219 * tip
1221 * tip
1220 * 0
1222 * 0
1221 * -10:
1223 * -10:
1222 * :10
1224 * :10
1223 * -10: + :10
1225 * -10: + :10
1224 * :10: + -10:
1226 * :10: + -10:
1225 * -10000:
1227 * -10000:
1226 * -10000: + 0
1228 * -10000: + 0
1227
1229
1228 The command currently focus on valid binary lookup. Benchmarking for
1230 The command currently focus on valid binary lookup. Benchmarking for
1229 hexlookup, prefix lookup and missing lookup would also be valuable.
1231 hexlookup, prefix lookup and missing lookup would also be valuable.
1230 """
1232 """
1231 import mercurial.revlog
1233 import mercurial.revlog
1232 opts = _byteskwargs(opts)
1234 opts = _byteskwargs(opts)
1233 timer, fm = gettimer(ui, opts)
1235 timer, fm = gettimer(ui, opts)
1234 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1236 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1235
1237
1236 unfi = repo.unfiltered()
1238 unfi = repo.unfiltered()
1237 clearcaches = opts['clear_caches']
1239 clearcaches = opts['clear_caches']
1238 # find the filecache func directly
1240 # find the filecache func directly
1239 # This avoid polluting the benchmark with the filecache logic
1241 # This avoid polluting the benchmark with the filecache logic
1240 makecl = unfi.__class__.changelog.func
1242 makecl = unfi.__class__.changelog.func
1241 if not opts[b'rev']:
1243 if not opts[b'rev']:
1242 raise error.Abort('use --rev to specify revisions to look up')
1244 raise error.Abort('use --rev to specify revisions to look up')
1243 revs = scmutil.revrange(repo, opts[b'rev'])
1245 revs = scmutil.revrange(repo, opts[b'rev'])
1244 cl = repo.changelog
1246 cl = repo.changelog
1245 nodes = [cl.node(r) for r in revs]
1247 nodes = [cl.node(r) for r in revs]
1246
1248
1247 # use a list to pass reference to a nodemap from one closure to the next
1249 # use a list to pass reference to a nodemap from one closure to the next
1248 nodeget = [None]
1250 nodeget = [None]
1249 def setnodeget():
1251 def setnodeget():
1250 # probably not necessary, but for good measure
1252 # probably not necessary, but for good measure
1251 clearchangelog(unfi)
1253 clearchangelog(unfi)
1252 nodeget[0] = makecl(unfi).nodemap.get
1254 nodeget[0] = makecl(unfi).nodemap.get
1253
1255
1254 def d():
1256 def d():
1255 get = nodeget[0]
1257 get = nodeget[0]
1256 for n in nodes:
1258 for n in nodes:
1257 get(n)
1259 get(n)
1258
1260
1259 setup = None
1261 setup = None
1260 if clearcaches:
1262 if clearcaches:
1261 def setup():
1263 def setup():
1262 setnodeget()
1264 setnodeget()
1263 else:
1265 else:
1264 setnodeget()
1266 setnodeget()
1265 d() # prewarm the data structure
1267 d() # prewarm the data structure
1266 timer(d, setup=setup)
1268 timer(d, setup=setup)
1267 fm.end()
1269 fm.end()
1268
1270
1269 @command(b'perfstartup', formatteropts)
1271 @command(b'perfstartup', formatteropts)
1270 def perfstartup(ui, repo, **opts):
1272 def perfstartup(ui, repo, **opts):
1271 opts = _byteskwargs(opts)
1273 opts = _byteskwargs(opts)
1272 timer, fm = gettimer(ui, opts)
1274 timer, fm = gettimer(ui, opts)
1273 def d():
1275 def d():
1274 if os.name != r'nt':
1276 if os.name != r'nt':
1275 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1277 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1276 fsencode(sys.argv[0]))
1278 fsencode(sys.argv[0]))
1277 else:
1279 else:
1278 os.environ[r'HGRCPATH'] = r' '
1280 os.environ[r'HGRCPATH'] = r' '
1279 os.system(r"%s version -q > NUL" % sys.argv[0])
1281 os.system(r"%s version -q > NUL" % sys.argv[0])
1280 timer(d)
1282 timer(d)
1281 fm.end()
1283 fm.end()
1282
1284
1283 @command(b'perfparents', formatteropts)
1285 @command(b'perfparents', formatteropts)
1284 def perfparents(ui, repo, **opts):
1286 def perfparents(ui, repo, **opts):
1285 """benchmark the time necessary to fetch one changeset's parents.
1287 """benchmark the time necessary to fetch one changeset's parents.
1286
1288
1287 The fetch is done using the `node identifier`, traversing all object layers
1289 The fetch is done using the `node identifier`, traversing all object layers
1288 from the repository object. The first N revisions will be used for this
1290 from the repository object. The first N revisions will be used for this
1289 benchmark. N is controlled by the ``perf.parentscount`` config option
1291 benchmark. N is controlled by the ``perf.parentscount`` config option
1290 (default: 1000).
1292 (default: 1000).
1291 """
1293 """
1292 opts = _byteskwargs(opts)
1294 opts = _byteskwargs(opts)
1293 timer, fm = gettimer(ui, opts)
1295 timer, fm = gettimer(ui, opts)
1294 # control the number of commits perfparents iterates over
1296 # control the number of commits perfparents iterates over
1295 # experimental config: perf.parentscount
1297 # experimental config: perf.parentscount
1296 count = getint(ui, b"perf", b"parentscount", 1000)
1298 count = getint(ui, b"perf", b"parentscount", 1000)
1297 if len(repo.changelog) < count:
1299 if len(repo.changelog) < count:
1298 raise error.Abort(b"repo needs %d commits for this test" % count)
1300 raise error.Abort(b"repo needs %d commits for this test" % count)
1299 repo = repo.unfiltered()
1301 repo = repo.unfiltered()
1300 nl = [repo.changelog.node(i) for i in _xrange(count)]
1302 nl = [repo.changelog.node(i) for i in _xrange(count)]
1301 def d():
1303 def d():
1302 for n in nl:
1304 for n in nl:
1303 repo.changelog.parents(n)
1305 repo.changelog.parents(n)
1304 timer(d)
1306 timer(d)
1305 fm.end()
1307 fm.end()
1306
1308
1307 @command(b'perfctxfiles', formatteropts)
1309 @command(b'perfctxfiles', formatteropts)
1308 def perfctxfiles(ui, repo, x, **opts):
1310 def perfctxfiles(ui, repo, x, **opts):
1309 opts = _byteskwargs(opts)
1311 opts = _byteskwargs(opts)
1310 x = int(x)
1312 x = int(x)
1311 timer, fm = gettimer(ui, opts)
1313 timer, fm = gettimer(ui, opts)
1312 def d():
1314 def d():
1313 len(repo[x].files())
1315 len(repo[x].files())
1314 timer(d)
1316 timer(d)
1315 fm.end()
1317 fm.end()
1316
1318
1317 @command(b'perfrawfiles', formatteropts)
1319 @command(b'perfrawfiles', formatteropts)
1318 def perfrawfiles(ui, repo, x, **opts):
1320 def perfrawfiles(ui, repo, x, **opts):
1319 opts = _byteskwargs(opts)
1321 opts = _byteskwargs(opts)
1320 x = int(x)
1322 x = int(x)
1321 timer, fm = gettimer(ui, opts)
1323 timer, fm = gettimer(ui, opts)
1322 cl = repo.changelog
1324 cl = repo.changelog
1323 def d():
1325 def d():
1324 len(cl.read(x)[3])
1326 len(cl.read(x)[3])
1325 timer(d)
1327 timer(d)
1326 fm.end()
1328 fm.end()
1327
1329
1328 @command(b'perflookup', formatteropts)
1330 @command(b'perflookup', formatteropts)
1329 def perflookup(ui, repo, rev, **opts):
1331 def perflookup(ui, repo, rev, **opts):
1330 opts = _byteskwargs(opts)
1332 opts = _byteskwargs(opts)
1331 timer, fm = gettimer(ui, opts)
1333 timer, fm = gettimer(ui, opts)
1332 timer(lambda: len(repo.lookup(rev)))
1334 timer(lambda: len(repo.lookup(rev)))
1333 fm.end()
1335 fm.end()
1334
1336
1335 @command(b'perflinelogedits',
1337 @command(b'perflinelogedits',
1336 [(b'n', b'edits', 10000, b'number of edits'),
1338 [(b'n', b'edits', 10000, b'number of edits'),
1337 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1339 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1338 ], norepo=True)
1340 ], norepo=True)
1339 def perflinelogedits(ui, **opts):
1341 def perflinelogedits(ui, **opts):
1340 from mercurial import linelog
1342 from mercurial import linelog
1341
1343
1342 opts = _byteskwargs(opts)
1344 opts = _byteskwargs(opts)
1343
1345
1344 edits = opts[b'edits']
1346 edits = opts[b'edits']
1345 maxhunklines = opts[b'max_hunk_lines']
1347 maxhunklines = opts[b'max_hunk_lines']
1346
1348
1347 maxb1 = 100000
1349 maxb1 = 100000
1348 random.seed(0)
1350 random.seed(0)
1349 randint = random.randint
1351 randint = random.randint
1350 currentlines = 0
1352 currentlines = 0
1351 arglist = []
1353 arglist = []
1352 for rev in _xrange(edits):
1354 for rev in _xrange(edits):
1353 a1 = randint(0, currentlines)
1355 a1 = randint(0, currentlines)
1354 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1356 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1355 b1 = randint(0, maxb1)
1357 b1 = randint(0, maxb1)
1356 b2 = randint(b1, b1 + maxhunklines)
1358 b2 = randint(b1, b1 + maxhunklines)
1357 currentlines += (b2 - b1) - (a2 - a1)
1359 currentlines += (b2 - b1) - (a2 - a1)
1358 arglist.append((rev, a1, a2, b1, b2))
1360 arglist.append((rev, a1, a2, b1, b2))
1359
1361
1360 def d():
1362 def d():
1361 ll = linelog.linelog()
1363 ll = linelog.linelog()
1362 for args in arglist:
1364 for args in arglist:
1363 ll.replacelines(*args)
1365 ll.replacelines(*args)
1364
1366
1365 timer, fm = gettimer(ui, opts)
1367 timer, fm = gettimer(ui, opts)
1366 timer(d)
1368 timer(d)
1367 fm.end()
1369 fm.end()
1368
1370
1369 @command(b'perfrevrange', formatteropts)
1371 @command(b'perfrevrange', formatteropts)
1370 def perfrevrange(ui, repo, *specs, **opts):
1372 def perfrevrange(ui, repo, *specs, **opts):
1371 opts = _byteskwargs(opts)
1373 opts = _byteskwargs(opts)
1372 timer, fm = gettimer(ui, opts)
1374 timer, fm = gettimer(ui, opts)
1373 revrange = scmutil.revrange
1375 revrange = scmutil.revrange
1374 timer(lambda: len(revrange(repo, specs)))
1376 timer(lambda: len(revrange(repo, specs)))
1375 fm.end()
1377 fm.end()
1376
1378
1377 @command(b'perfnodelookup', formatteropts)
1379 @command(b'perfnodelookup', formatteropts)
1378 def perfnodelookup(ui, repo, rev, **opts):
1380 def perfnodelookup(ui, repo, rev, **opts):
1379 opts = _byteskwargs(opts)
1381 opts = _byteskwargs(opts)
1380 timer, fm = gettimer(ui, opts)
1382 timer, fm = gettimer(ui, opts)
1381 import mercurial.revlog
1383 import mercurial.revlog
1382 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1384 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1383 n = scmutil.revsingle(repo, rev).node()
1385 n = scmutil.revsingle(repo, rev).node()
1384 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1386 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1385 def d():
1387 def d():
1386 cl.rev(n)
1388 cl.rev(n)
1387 clearcaches(cl)
1389 clearcaches(cl)
1388 timer(d)
1390 timer(d)
1389 fm.end()
1391 fm.end()
1390
1392
1391 @command(b'perflog',
1393 @command(b'perflog',
1392 [(b'', b'rename', False, b'ask log to follow renames')
1394 [(b'', b'rename', False, b'ask log to follow renames')
1393 ] + formatteropts)
1395 ] + formatteropts)
1394 def perflog(ui, repo, rev=None, **opts):
1396 def perflog(ui, repo, rev=None, **opts):
1395 opts = _byteskwargs(opts)
1397 opts = _byteskwargs(opts)
1396 if rev is None:
1398 if rev is None:
1397 rev=[]
1399 rev=[]
1398 timer, fm = gettimer(ui, opts)
1400 timer, fm = gettimer(ui, opts)
1399 ui.pushbuffer()
1401 ui.pushbuffer()
1400 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1402 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1401 copies=opts.get(b'rename')))
1403 copies=opts.get(b'rename')))
1402 ui.popbuffer()
1404 ui.popbuffer()
1403 fm.end()
1405 fm.end()
1404
1406
1405 @command(b'perfmoonwalk', formatteropts)
1407 @command(b'perfmoonwalk', formatteropts)
1406 def perfmoonwalk(ui, repo, **opts):
1408 def perfmoonwalk(ui, repo, **opts):
1407 """benchmark walking the changelog backwards
1409 """benchmark walking the changelog backwards
1408
1410
1409 This also loads the changelog data for each revision in the changelog.
1411 This also loads the changelog data for each revision in the changelog.
1410 """
1412 """
1411 opts = _byteskwargs(opts)
1413 opts = _byteskwargs(opts)
1412 timer, fm = gettimer(ui, opts)
1414 timer, fm = gettimer(ui, opts)
1413 def moonwalk():
1415 def moonwalk():
1414 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1416 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1415 ctx = repo[i]
1417 ctx = repo[i]
1416 ctx.branch() # read changelog data (in addition to the index)
1418 ctx.branch() # read changelog data (in addition to the index)
1417 timer(moonwalk)
1419 timer(moonwalk)
1418 fm.end()
1420 fm.end()
1419
1421
1420 @command(b'perftemplating',
1422 @command(b'perftemplating',
1421 [(b'r', b'rev', [], b'revisions to run the template on'),
1423 [(b'r', b'rev', [], b'revisions to run the template on'),
1422 ] + formatteropts)
1424 ] + formatteropts)
1423 def perftemplating(ui, repo, testedtemplate=None, **opts):
1425 def perftemplating(ui, repo, testedtemplate=None, **opts):
1424 """test the rendering time of a given template"""
1426 """test the rendering time of a given template"""
1425 if makelogtemplater is None:
1427 if makelogtemplater is None:
1426 raise error.Abort((b"perftemplating not available with this Mercurial"),
1428 raise error.Abort((b"perftemplating not available with this Mercurial"),
1427 hint=b"use 4.3 or later")
1429 hint=b"use 4.3 or later")
1428
1430
1429 opts = _byteskwargs(opts)
1431 opts = _byteskwargs(opts)
1430
1432
1431 nullui = ui.copy()
1433 nullui = ui.copy()
1432 nullui.fout = open(os.devnull, r'wb')
1434 nullui.fout = open(os.devnull, r'wb')
1433 nullui.disablepager()
1435 nullui.disablepager()
1434 revs = opts.get(b'rev')
1436 revs = opts.get(b'rev')
1435 if not revs:
1437 if not revs:
1436 revs = [b'all()']
1438 revs = [b'all()']
1437 revs = list(scmutil.revrange(repo, revs))
1439 revs = list(scmutil.revrange(repo, revs))
1438
1440
1439 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1441 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1440 b' {author|person}: {desc|firstline}\n')
1442 b' {author|person}: {desc|firstline}\n')
1441 if testedtemplate is None:
1443 if testedtemplate is None:
1442 testedtemplate = defaulttemplate
1444 testedtemplate = defaulttemplate
1443 displayer = makelogtemplater(nullui, repo, testedtemplate)
1445 displayer = makelogtemplater(nullui, repo, testedtemplate)
1444 def format():
1446 def format():
1445 for r in revs:
1447 for r in revs:
1446 ctx = repo[r]
1448 ctx = repo[r]
1447 displayer.show(ctx)
1449 displayer.show(ctx)
1448 displayer.flush(ctx)
1450 displayer.flush(ctx)
1449
1451
1450 timer, fm = gettimer(ui, opts)
1452 timer, fm = gettimer(ui, opts)
1451 timer(format)
1453 timer(format)
1452 fm.end()
1454 fm.end()
1453
1455
1454 @command(b'perfhelper-pathcopies', formatteropts +
1456 @command(b'perfhelper-pathcopies', formatteropts +
1455 [
1457 [
1456 (b'r', b'revs', [], b'restrict search to these revisions'),
1458 (b'r', b'revs', [], b'restrict search to these revisions'),
1457 (b'', b'timing', False, b'provides extra data (costly)'),
1459 (b'', b'timing', False, b'provides extra data (costly)'),
1458 ])
1460 ])
1459 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1461 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1460 """find statistic about potential parameters for the `perftracecopies`
1462 """find statistic about potential parameters for the `perftracecopies`
1461
1463
1462 This command find source-destination pair relevant for copytracing testing.
1464 This command find source-destination pair relevant for copytracing testing.
1463 It report value for some of the parameters that impact copy tracing time.
1465 It report value for some of the parameters that impact copy tracing time.
1464
1466
1465 If `--timing` is set, rename detection is run and the associated timing
1467 If `--timing` is set, rename detection is run and the associated timing
1466 will be reported. The extra details comes at the cost of a slower command
1468 will be reported. The extra details comes at the cost of a slower command
1467 execution.
1469 execution.
1468
1470
1469 Since the rename detection is only run once, other factors might easily
1471 Since the rename detection is only run once, other factors might easily
1470 affect the precision of the timing. However it should give a good
1472 affect the precision of the timing. However it should give a good
1471 approximation of which revision pairs are very costly.
1473 approximation of which revision pairs are very costly.
1472 """
1474 """
1473 opts = _byteskwargs(opts)
1475 opts = _byteskwargs(opts)
1474 fm = ui.formatter(b'perf', opts)
1476 fm = ui.formatter(b'perf', opts)
1475 dotiming = opts[b'timing']
1477 dotiming = opts[b'timing']
1476
1478
1477 if dotiming:
1479 if dotiming:
1478 header = '%12s %12s %12s %12s %12s %12s\n'
1480 header = '%12s %12s %12s %12s %12s %12s\n'
1479 output = ("%(source)12s %(destination)12s "
1481 output = ("%(source)12s %(destination)12s "
1480 "%(nbrevs)12d %(nbmissingfiles)12d "
1482 "%(nbrevs)12d %(nbmissingfiles)12d "
1481 "%(nbrenamedfiles)12d %(time)18.5f\n")
1483 "%(nbrenamedfiles)12d %(time)18.5f\n")
1482 header_names = ("source", "destination", "nb-revs", "nb-files",
1484 header_names = ("source", "destination", "nb-revs", "nb-files",
1483 "nb-renames", "time")
1485 "nb-renames", "time")
1484 fm.plain(header % header_names)
1486 fm.plain(header % header_names)
1485 else:
1487 else:
1486 header = '%12s %12s %12s %12s\n'
1488 header = '%12s %12s %12s %12s\n'
1487 output = ("%(source)12s %(destination)12s "
1489 output = ("%(source)12s %(destination)12s "
1488 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1490 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1489 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1491 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1490
1492
1491 if not revs:
1493 if not revs:
1492 revs = ['all()']
1494 revs = ['all()']
1493 revs = scmutil.revrange(repo, revs)
1495 revs = scmutil.revrange(repo, revs)
1494
1496
1495 roi = repo.revs('merge() and %ld', revs)
1497 roi = repo.revs('merge() and %ld', revs)
1496 for r in roi:
1498 for r in roi:
1497 ctx = repo[r]
1499 ctx = repo[r]
1498 p1 = ctx.p1().rev()
1500 p1 = ctx.p1().rev()
1499 p2 = ctx.p2().rev()
1501 p2 = ctx.p2().rev()
1500 bases = repo.changelog._commonancestorsheads(p1, p2)
1502 bases = repo.changelog._commonancestorsheads(p1, p2)
1501 for p in (p1, p2):
1503 for p in (p1, p2):
1502 for b in bases:
1504 for b in bases:
1503 base = repo[b]
1505 base = repo[b]
1504 parent = repo[p]
1506 parent = repo[p]
1505 missing = copies._computeforwardmissing(base, parent)
1507 missing = copies._computeforwardmissing(base, parent)
1506 if not missing:
1508 if not missing:
1507 continue
1509 continue
1508 data = {
1510 data = {
1509 b'source': base.hex(),
1511 b'source': base.hex(),
1510 b'destination': parent.hex(),
1512 b'destination': parent.hex(),
1511 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1513 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1512 b'nbmissingfiles': len(missing),
1514 b'nbmissingfiles': len(missing),
1513 }
1515 }
1514 if dotiming:
1516 if dotiming:
1515 begin = util.timer()
1517 begin = util.timer()
1516 renames = copies.pathcopies(base, parent)
1518 renames = copies.pathcopies(base, parent)
1517 end = util.timer()
1519 end = util.timer()
1518 # not very stable timing since we did only one run
1520 # not very stable timing since we did only one run
1519 data['time'] = end - begin
1521 data['time'] = end - begin
1520 data['nbrenamedfiles'] = len(renames)
1522 data['nbrenamedfiles'] = len(renames)
1521 fm.startitem()
1523 fm.startitem()
1522 fm.data(**data)
1524 fm.data(**data)
1523 out = data.copy()
1525 out = data.copy()
1524 out['source'] = fm.hexfunc(base.node())
1526 out['source'] = fm.hexfunc(base.node())
1525 out['destination'] = fm.hexfunc(parent.node())
1527 out['destination'] = fm.hexfunc(parent.node())
1526 fm.plain(output % out)
1528 fm.plain(output % out)
1527
1529
1528 fm.end()
1530 fm.end()
1529
1531
1530 @command(b'perfcca', formatteropts)
1532 @command(b'perfcca', formatteropts)
1531 def perfcca(ui, repo, **opts):
1533 def perfcca(ui, repo, **opts):
1532 opts = _byteskwargs(opts)
1534 opts = _byteskwargs(opts)
1533 timer, fm = gettimer(ui, opts)
1535 timer, fm = gettimer(ui, opts)
1534 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1536 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1535 fm.end()
1537 fm.end()
1536
1538
1537 @command(b'perffncacheload', formatteropts)
1539 @command(b'perffncacheload', formatteropts)
1538 def perffncacheload(ui, repo, **opts):
1540 def perffncacheload(ui, repo, **opts):
1539 opts = _byteskwargs(opts)
1541 opts = _byteskwargs(opts)
1540 timer, fm = gettimer(ui, opts)
1542 timer, fm = gettimer(ui, opts)
1541 s = repo.store
1543 s = repo.store
1542 def d():
1544 def d():
1543 s.fncache._load()
1545 s.fncache._load()
1544 timer(d)
1546 timer(d)
1545 fm.end()
1547 fm.end()
1546
1548
1547 @command(b'perffncachewrite', formatteropts)
1549 @command(b'perffncachewrite', formatteropts)
1548 def perffncachewrite(ui, repo, **opts):
1550 def perffncachewrite(ui, repo, **opts):
1549 opts = _byteskwargs(opts)
1551 opts = _byteskwargs(opts)
1550 timer, fm = gettimer(ui, opts)
1552 timer, fm = gettimer(ui, opts)
1551 s = repo.store
1553 s = repo.store
1552 lock = repo.lock()
1554 lock = repo.lock()
1553 s.fncache._load()
1555 s.fncache._load()
1554 tr = repo.transaction(b'perffncachewrite')
1556 tr = repo.transaction(b'perffncachewrite')
1555 tr.addbackup(b'fncache')
1557 tr.addbackup(b'fncache')
1556 def d():
1558 def d():
1557 s.fncache._dirty = True
1559 s.fncache._dirty = True
1558 s.fncache.write(tr)
1560 s.fncache.write(tr)
1559 timer(d)
1561 timer(d)
1560 tr.close()
1562 tr.close()
1561 lock.release()
1563 lock.release()
1562 fm.end()
1564 fm.end()
1563
1565
1564 @command(b'perffncacheencode', formatteropts)
1566 @command(b'perffncacheencode', formatteropts)
1565 def perffncacheencode(ui, repo, **opts):
1567 def perffncacheencode(ui, repo, **opts):
1566 opts = _byteskwargs(opts)
1568 opts = _byteskwargs(opts)
1567 timer, fm = gettimer(ui, opts)
1569 timer, fm = gettimer(ui, opts)
1568 s = repo.store
1570 s = repo.store
1569 s.fncache._load()
1571 s.fncache._load()
1570 def d():
1572 def d():
1571 for p in s.fncache.entries:
1573 for p in s.fncache.entries:
1572 s.encode(p)
1574 s.encode(p)
1573 timer(d)
1575 timer(d)
1574 fm.end()
1576 fm.end()
1575
1577
1576 def _bdiffworker(q, blocks, xdiff, ready, done):
1578 def _bdiffworker(q, blocks, xdiff, ready, done):
1577 while not done.is_set():
1579 while not done.is_set():
1578 pair = q.get()
1580 pair = q.get()
1579 while pair is not None:
1581 while pair is not None:
1580 if xdiff:
1582 if xdiff:
1581 mdiff.bdiff.xdiffblocks(*pair)
1583 mdiff.bdiff.xdiffblocks(*pair)
1582 elif blocks:
1584 elif blocks:
1583 mdiff.bdiff.blocks(*pair)
1585 mdiff.bdiff.blocks(*pair)
1584 else:
1586 else:
1585 mdiff.textdiff(*pair)
1587 mdiff.textdiff(*pair)
1586 q.task_done()
1588 q.task_done()
1587 pair = q.get()
1589 pair = q.get()
1588 q.task_done() # for the None one
1590 q.task_done() # for the None one
1589 with ready:
1591 with ready:
1590 ready.wait()
1592 ready.wait()
1591
1593
1592 def _manifestrevision(repo, mnode):
1594 def _manifestrevision(repo, mnode):
1593 ml = repo.manifestlog
1595 ml = repo.manifestlog
1594
1596
1595 if util.safehasattr(ml, b'getstorage'):
1597 if util.safehasattr(ml, b'getstorage'):
1596 store = ml.getstorage(b'')
1598 store = ml.getstorage(b'')
1597 else:
1599 else:
1598 store = ml._revlog
1600 store = ml._revlog
1599
1601
1600 return store.revision(mnode)
1602 return store.revision(mnode)
1601
1603
1602 @command(b'perfbdiff', revlogopts + formatteropts + [
1604 @command(b'perfbdiff', revlogopts + formatteropts + [
1603 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1605 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1604 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1606 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1605 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1607 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1606 (b'', b'blocks', False, b'test computing diffs into blocks'),
1608 (b'', b'blocks', False, b'test computing diffs into blocks'),
1607 (b'', b'xdiff', False, b'use xdiff algorithm'),
1609 (b'', b'xdiff', False, b'use xdiff algorithm'),
1608 ],
1610 ],
1609
1611
1610 b'-c|-m|FILE REV')
1612 b'-c|-m|FILE REV')
1611 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1613 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1612 """benchmark a bdiff between revisions
1614 """benchmark a bdiff between revisions
1613
1615
1614 By default, benchmark a bdiff between its delta parent and itself.
1616 By default, benchmark a bdiff between its delta parent and itself.
1615
1617
1616 With ``--count``, benchmark bdiffs between delta parents and self for N
1618 With ``--count``, benchmark bdiffs between delta parents and self for N
1617 revisions starting at the specified revision.
1619 revisions starting at the specified revision.
1618
1620
1619 With ``--alldata``, assume the requested revision is a changeset and
1621 With ``--alldata``, assume the requested revision is a changeset and
1620 measure bdiffs for all changes related to that changeset (manifest
1622 measure bdiffs for all changes related to that changeset (manifest
1621 and filelogs).
1623 and filelogs).
1622 """
1624 """
1623 opts = _byteskwargs(opts)
1625 opts = _byteskwargs(opts)
1624
1626
1625 if opts[b'xdiff'] and not opts[b'blocks']:
1627 if opts[b'xdiff'] and not opts[b'blocks']:
1626 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1628 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1627
1629
1628 if opts[b'alldata']:
1630 if opts[b'alldata']:
1629 opts[b'changelog'] = True
1631 opts[b'changelog'] = True
1630
1632
1631 if opts.get(b'changelog') or opts.get(b'manifest'):
1633 if opts.get(b'changelog') or opts.get(b'manifest'):
1632 file_, rev = None, file_
1634 file_, rev = None, file_
1633 elif rev is None:
1635 elif rev is None:
1634 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1636 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1635
1637
1636 blocks = opts[b'blocks']
1638 blocks = opts[b'blocks']
1637 xdiff = opts[b'xdiff']
1639 xdiff = opts[b'xdiff']
1638 textpairs = []
1640 textpairs = []
1639
1641
1640 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1642 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1641
1643
1642 startrev = r.rev(r.lookup(rev))
1644 startrev = r.rev(r.lookup(rev))
1643 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1645 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1644 if opts[b'alldata']:
1646 if opts[b'alldata']:
1645 # Load revisions associated with changeset.
1647 # Load revisions associated with changeset.
1646 ctx = repo[rev]
1648 ctx = repo[rev]
1647 mtext = _manifestrevision(repo, ctx.manifestnode())
1649 mtext = _manifestrevision(repo, ctx.manifestnode())
1648 for pctx in ctx.parents():
1650 for pctx in ctx.parents():
1649 pman = _manifestrevision(repo, pctx.manifestnode())
1651 pman = _manifestrevision(repo, pctx.manifestnode())
1650 textpairs.append((pman, mtext))
1652 textpairs.append((pman, mtext))
1651
1653
1652 # Load filelog revisions by iterating manifest delta.
1654 # Load filelog revisions by iterating manifest delta.
1653 man = ctx.manifest()
1655 man = ctx.manifest()
1654 pman = ctx.p1().manifest()
1656 pman = ctx.p1().manifest()
1655 for filename, change in pman.diff(man).items():
1657 for filename, change in pman.diff(man).items():
1656 fctx = repo.file(filename)
1658 fctx = repo.file(filename)
1657 f1 = fctx.revision(change[0][0] or -1)
1659 f1 = fctx.revision(change[0][0] or -1)
1658 f2 = fctx.revision(change[1][0] or -1)
1660 f2 = fctx.revision(change[1][0] or -1)
1659 textpairs.append((f1, f2))
1661 textpairs.append((f1, f2))
1660 else:
1662 else:
1661 dp = r.deltaparent(rev)
1663 dp = r.deltaparent(rev)
1662 textpairs.append((r.revision(dp), r.revision(rev)))
1664 textpairs.append((r.revision(dp), r.revision(rev)))
1663
1665
1664 withthreads = threads > 0
1666 withthreads = threads > 0
1665 if not withthreads:
1667 if not withthreads:
1666 def d():
1668 def d():
1667 for pair in textpairs:
1669 for pair in textpairs:
1668 if xdiff:
1670 if xdiff:
1669 mdiff.bdiff.xdiffblocks(*pair)
1671 mdiff.bdiff.xdiffblocks(*pair)
1670 elif blocks:
1672 elif blocks:
1671 mdiff.bdiff.blocks(*pair)
1673 mdiff.bdiff.blocks(*pair)
1672 else:
1674 else:
1673 mdiff.textdiff(*pair)
1675 mdiff.textdiff(*pair)
1674 else:
1676 else:
1675 q = queue()
1677 q = queue()
1676 for i in _xrange(threads):
1678 for i in _xrange(threads):
1677 q.put(None)
1679 q.put(None)
1678 ready = threading.Condition()
1680 ready = threading.Condition()
1679 done = threading.Event()
1681 done = threading.Event()
1680 for i in _xrange(threads):
1682 for i in _xrange(threads):
1681 threading.Thread(target=_bdiffworker,
1683 threading.Thread(target=_bdiffworker,
1682 args=(q, blocks, xdiff, ready, done)).start()
1684 args=(q, blocks, xdiff, ready, done)).start()
1683 q.join()
1685 q.join()
1684 def d():
1686 def d():
1685 for pair in textpairs:
1687 for pair in textpairs:
1686 q.put(pair)
1688 q.put(pair)
1687 for i in _xrange(threads):
1689 for i in _xrange(threads):
1688 q.put(None)
1690 q.put(None)
1689 with ready:
1691 with ready:
1690 ready.notify_all()
1692 ready.notify_all()
1691 q.join()
1693 q.join()
1692 timer, fm = gettimer(ui, opts)
1694 timer, fm = gettimer(ui, opts)
1693 timer(d)
1695 timer(d)
1694 fm.end()
1696 fm.end()
1695
1697
1696 if withthreads:
1698 if withthreads:
1697 done.set()
1699 done.set()
1698 for i in _xrange(threads):
1700 for i in _xrange(threads):
1699 q.put(None)
1701 q.put(None)
1700 with ready:
1702 with ready:
1701 ready.notify_all()
1703 ready.notify_all()
1702
1704
1703 @command(b'perfunidiff', revlogopts + formatteropts + [
1705 @command(b'perfunidiff', revlogopts + formatteropts + [
1704 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1706 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1705 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1707 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1706 ], b'-c|-m|FILE REV')
1708 ], b'-c|-m|FILE REV')
1707 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1709 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1708 """benchmark a unified diff between revisions
1710 """benchmark a unified diff between revisions
1709
1711
1710 This doesn't include any copy tracing - it's just a unified diff
1712 This doesn't include any copy tracing - it's just a unified diff
1711 of the texts.
1713 of the texts.
1712
1714
1713 By default, benchmark a diff between its delta parent and itself.
1715 By default, benchmark a diff between its delta parent and itself.
1714
1716
1715 With ``--count``, benchmark diffs between delta parents and self for N
1717 With ``--count``, benchmark diffs between delta parents and self for N
1716 revisions starting at the specified revision.
1718 revisions starting at the specified revision.
1717
1719
1718 With ``--alldata``, assume the requested revision is a changeset and
1720 With ``--alldata``, assume the requested revision is a changeset and
1719 measure diffs for all changes related to that changeset (manifest
1721 measure diffs for all changes related to that changeset (manifest
1720 and filelogs).
1722 and filelogs).
1721 """
1723 """
1722 opts = _byteskwargs(opts)
1724 opts = _byteskwargs(opts)
1723 if opts[b'alldata']:
1725 if opts[b'alldata']:
1724 opts[b'changelog'] = True
1726 opts[b'changelog'] = True
1725
1727
1726 if opts.get(b'changelog') or opts.get(b'manifest'):
1728 if opts.get(b'changelog') or opts.get(b'manifest'):
1727 file_, rev = None, file_
1729 file_, rev = None, file_
1728 elif rev is None:
1730 elif rev is None:
1729 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1731 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1730
1732
1731 textpairs = []
1733 textpairs = []
1732
1734
1733 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1735 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1734
1736
1735 startrev = r.rev(r.lookup(rev))
1737 startrev = r.rev(r.lookup(rev))
1736 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1738 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1737 if opts[b'alldata']:
1739 if opts[b'alldata']:
1738 # Load revisions associated with changeset.
1740 # Load revisions associated with changeset.
1739 ctx = repo[rev]
1741 ctx = repo[rev]
1740 mtext = _manifestrevision(repo, ctx.manifestnode())
1742 mtext = _manifestrevision(repo, ctx.manifestnode())
1741 for pctx in ctx.parents():
1743 for pctx in ctx.parents():
1742 pman = _manifestrevision(repo, pctx.manifestnode())
1744 pman = _manifestrevision(repo, pctx.manifestnode())
1743 textpairs.append((pman, mtext))
1745 textpairs.append((pman, mtext))
1744
1746
1745 # Load filelog revisions by iterating manifest delta.
1747 # Load filelog revisions by iterating manifest delta.
1746 man = ctx.manifest()
1748 man = ctx.manifest()
1747 pman = ctx.p1().manifest()
1749 pman = ctx.p1().manifest()
1748 for filename, change in pman.diff(man).items():
1750 for filename, change in pman.diff(man).items():
1749 fctx = repo.file(filename)
1751 fctx = repo.file(filename)
1750 f1 = fctx.revision(change[0][0] or -1)
1752 f1 = fctx.revision(change[0][0] or -1)
1751 f2 = fctx.revision(change[1][0] or -1)
1753 f2 = fctx.revision(change[1][0] or -1)
1752 textpairs.append((f1, f2))
1754 textpairs.append((f1, f2))
1753 else:
1755 else:
1754 dp = r.deltaparent(rev)
1756 dp = r.deltaparent(rev)
1755 textpairs.append((r.revision(dp), r.revision(rev)))
1757 textpairs.append((r.revision(dp), r.revision(rev)))
1756
1758
1757 def d():
1759 def d():
1758 for left, right in textpairs:
1760 for left, right in textpairs:
1759 # The date strings don't matter, so we pass empty strings.
1761 # The date strings don't matter, so we pass empty strings.
1760 headerlines, hunks = mdiff.unidiff(
1762 headerlines, hunks = mdiff.unidiff(
1761 left, b'', right, b'', b'left', b'right', binary=False)
1763 left, b'', right, b'', b'left', b'right', binary=False)
1762 # consume iterators in roughly the way patch.py does
1764 # consume iterators in roughly the way patch.py does
1763 b'\n'.join(headerlines)
1765 b'\n'.join(headerlines)
1764 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1766 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1765 timer, fm = gettimer(ui, opts)
1767 timer, fm = gettimer(ui, opts)
1766 timer(d)
1768 timer(d)
1767 fm.end()
1769 fm.end()
1768
1770
1769 @command(b'perfdiffwd', formatteropts)
1771 @command(b'perfdiffwd', formatteropts)
1770 def perfdiffwd(ui, repo, **opts):
1772 def perfdiffwd(ui, repo, **opts):
1771 """Profile diff of working directory changes"""
1773 """Profile diff of working directory changes"""
1772 opts = _byteskwargs(opts)
1774 opts = _byteskwargs(opts)
1773 timer, fm = gettimer(ui, opts)
1775 timer, fm = gettimer(ui, opts)
1774 options = {
1776 options = {
1775 'w': 'ignore_all_space',
1777 'w': 'ignore_all_space',
1776 'b': 'ignore_space_change',
1778 'b': 'ignore_space_change',
1777 'B': 'ignore_blank_lines',
1779 'B': 'ignore_blank_lines',
1778 }
1780 }
1779
1781
1780 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1782 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1781 opts = dict((options[c], b'1') for c in diffopt)
1783 opts = dict((options[c], b'1') for c in diffopt)
1782 def d():
1784 def d():
1783 ui.pushbuffer()
1785 ui.pushbuffer()
1784 commands.diff(ui, repo, **opts)
1786 commands.diff(ui, repo, **opts)
1785 ui.popbuffer()
1787 ui.popbuffer()
1786 diffopt = diffopt.encode('ascii')
1788 diffopt = diffopt.encode('ascii')
1787 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1789 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1788 timer(d, title=title)
1790 timer(d, title=title)
1789 fm.end()
1791 fm.end()
1790
1792
1791 @command(b'perfrevlogindex', revlogopts + formatteropts,
1793 @command(b'perfrevlogindex', revlogopts + formatteropts,
1792 b'-c|-m|FILE')
1794 b'-c|-m|FILE')
1793 def perfrevlogindex(ui, repo, file_=None, **opts):
1795 def perfrevlogindex(ui, repo, file_=None, **opts):
1794 """Benchmark operations against a revlog index.
1796 """Benchmark operations against a revlog index.
1795
1797
1796 This tests constructing a revlog instance, reading index data,
1798 This tests constructing a revlog instance, reading index data,
1797 parsing index data, and performing various operations related to
1799 parsing index data, and performing various operations related to
1798 index data.
1800 index data.
1799 """
1801 """
1800
1802
1801 opts = _byteskwargs(opts)
1803 opts = _byteskwargs(opts)
1802
1804
1803 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1805 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1804
1806
1805 opener = getattr(rl, 'opener') # trick linter
1807 opener = getattr(rl, 'opener') # trick linter
1806 indexfile = rl.indexfile
1808 indexfile = rl.indexfile
1807 data = opener.read(indexfile)
1809 data = opener.read(indexfile)
1808
1810
1809 header = struct.unpack(b'>I', data[0:4])[0]
1811 header = struct.unpack(b'>I', data[0:4])[0]
1810 version = header & 0xFFFF
1812 version = header & 0xFFFF
1811 if version == 1:
1813 if version == 1:
1812 revlogio = revlog.revlogio()
1814 revlogio = revlog.revlogio()
1813 inline = header & (1 << 16)
1815 inline = header & (1 << 16)
1814 else:
1816 else:
1815 raise error.Abort((b'unsupported revlog version: %d') % version)
1817 raise error.Abort((b'unsupported revlog version: %d') % version)
1816
1818
1817 rllen = len(rl)
1819 rllen = len(rl)
1818
1820
1819 node0 = rl.node(0)
1821 node0 = rl.node(0)
1820 node25 = rl.node(rllen // 4)
1822 node25 = rl.node(rllen // 4)
1821 node50 = rl.node(rllen // 2)
1823 node50 = rl.node(rllen // 2)
1822 node75 = rl.node(rllen // 4 * 3)
1824 node75 = rl.node(rllen // 4 * 3)
1823 node100 = rl.node(rllen - 1)
1825 node100 = rl.node(rllen - 1)
1824
1826
1825 allrevs = range(rllen)
1827 allrevs = range(rllen)
1826 allrevsrev = list(reversed(allrevs))
1828 allrevsrev = list(reversed(allrevs))
1827 allnodes = [rl.node(rev) for rev in range(rllen)]
1829 allnodes = [rl.node(rev) for rev in range(rllen)]
1828 allnodesrev = list(reversed(allnodes))
1830 allnodesrev = list(reversed(allnodes))
1829
1831
1830 def constructor():
1832 def constructor():
1831 revlog.revlog(opener, indexfile)
1833 revlog.revlog(opener, indexfile)
1832
1834
1833 def read():
1835 def read():
1834 with opener(indexfile) as fh:
1836 with opener(indexfile) as fh:
1835 fh.read()
1837 fh.read()
1836
1838
1837 def parseindex():
1839 def parseindex():
1838 revlogio.parseindex(data, inline)
1840 revlogio.parseindex(data, inline)
1839
1841
1840 def getentry(revornode):
1842 def getentry(revornode):
1841 index = revlogio.parseindex(data, inline)[0]
1843 index = revlogio.parseindex(data, inline)[0]
1842 index[revornode]
1844 index[revornode]
1843
1845
1844 def getentries(revs, count=1):
1846 def getentries(revs, count=1):
1845 index = revlogio.parseindex(data, inline)[0]
1847 index = revlogio.parseindex(data, inline)[0]
1846
1848
1847 for i in range(count):
1849 for i in range(count):
1848 for rev in revs:
1850 for rev in revs:
1849 index[rev]
1851 index[rev]
1850
1852
1851 def resolvenode(node):
1853 def resolvenode(node):
1852 nodemap = revlogio.parseindex(data, inline)[1]
1854 nodemap = revlogio.parseindex(data, inline)[1]
1853 # This only works for the C code.
1855 # This only works for the C code.
1854 if nodemap is None:
1856 if nodemap is None:
1855 return
1857 return
1856
1858
1857 try:
1859 try:
1858 nodemap[node]
1860 nodemap[node]
1859 except error.RevlogError:
1861 except error.RevlogError:
1860 pass
1862 pass
1861
1863
1862 def resolvenodes(nodes, count=1):
1864 def resolvenodes(nodes, count=1):
1863 nodemap = revlogio.parseindex(data, inline)[1]
1865 nodemap = revlogio.parseindex(data, inline)[1]
1864 if nodemap is None:
1866 if nodemap is None:
1865 return
1867 return
1866
1868
1867 for i in range(count):
1869 for i in range(count):
1868 for node in nodes:
1870 for node in nodes:
1869 try:
1871 try:
1870 nodemap[node]
1872 nodemap[node]
1871 except error.RevlogError:
1873 except error.RevlogError:
1872 pass
1874 pass
1873
1875
1874 benches = [
1876 benches = [
1875 (constructor, b'revlog constructor'),
1877 (constructor, b'revlog constructor'),
1876 (read, b'read'),
1878 (read, b'read'),
1877 (parseindex, b'create index object'),
1879 (parseindex, b'create index object'),
1878 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1880 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1879 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1881 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1880 (lambda: resolvenode(node0), b'look up node at rev 0'),
1882 (lambda: resolvenode(node0), b'look up node at rev 0'),
1881 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1883 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1882 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1884 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1883 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1885 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1884 (lambda: resolvenode(node100), b'look up node at tip'),
1886 (lambda: resolvenode(node100), b'look up node at tip'),
1885 # 2x variation is to measure caching impact.
1887 # 2x variation is to measure caching impact.
1886 (lambda: resolvenodes(allnodes),
1888 (lambda: resolvenodes(allnodes),
1887 b'look up all nodes (forward)'),
1889 b'look up all nodes (forward)'),
1888 (lambda: resolvenodes(allnodes, 2),
1890 (lambda: resolvenodes(allnodes, 2),
1889 b'look up all nodes 2x (forward)'),
1891 b'look up all nodes 2x (forward)'),
1890 (lambda: resolvenodes(allnodesrev),
1892 (lambda: resolvenodes(allnodesrev),
1891 b'look up all nodes (reverse)'),
1893 b'look up all nodes (reverse)'),
1892 (lambda: resolvenodes(allnodesrev, 2),
1894 (lambda: resolvenodes(allnodesrev, 2),
1893 b'look up all nodes 2x (reverse)'),
1895 b'look up all nodes 2x (reverse)'),
1894 (lambda: getentries(allrevs),
1896 (lambda: getentries(allrevs),
1895 b'retrieve all index entries (forward)'),
1897 b'retrieve all index entries (forward)'),
1896 (lambda: getentries(allrevs, 2),
1898 (lambda: getentries(allrevs, 2),
1897 b'retrieve all index entries 2x (forward)'),
1899 b'retrieve all index entries 2x (forward)'),
1898 (lambda: getentries(allrevsrev),
1900 (lambda: getentries(allrevsrev),
1899 b'retrieve all index entries (reverse)'),
1901 b'retrieve all index entries (reverse)'),
1900 (lambda: getentries(allrevsrev, 2),
1902 (lambda: getentries(allrevsrev, 2),
1901 b'retrieve all index entries 2x (reverse)'),
1903 b'retrieve all index entries 2x (reverse)'),
1902 ]
1904 ]
1903
1905
1904 for fn, title in benches:
1906 for fn, title in benches:
1905 timer, fm = gettimer(ui, opts)
1907 timer, fm = gettimer(ui, opts)
1906 timer(fn, title=title)
1908 timer(fn, title=title)
1907 fm.end()
1909 fm.end()
1908
1910
1909 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1911 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1910 [(b'd', b'dist', 100, b'distance between the revisions'),
1912 [(b'd', b'dist', 100, b'distance between the revisions'),
1911 (b's', b'startrev', 0, b'revision to start reading at'),
1913 (b's', b'startrev', 0, b'revision to start reading at'),
1912 (b'', b'reverse', False, b'read in reverse')],
1914 (b'', b'reverse', False, b'read in reverse')],
1913 b'-c|-m|FILE')
1915 b'-c|-m|FILE')
1914 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1916 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1915 **opts):
1917 **opts):
1916 """Benchmark reading a series of revisions from a revlog.
1918 """Benchmark reading a series of revisions from a revlog.
1917
1919
1918 By default, we read every ``-d/--dist`` revision from 0 to tip of
1920 By default, we read every ``-d/--dist`` revision from 0 to tip of
1919 the specified revlog.
1921 the specified revlog.
1920
1922
1921 The start revision can be defined via ``-s/--startrev``.
1923 The start revision can be defined via ``-s/--startrev``.
1922 """
1924 """
1923 opts = _byteskwargs(opts)
1925 opts = _byteskwargs(opts)
1924
1926
1925 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1927 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1926 rllen = getlen(ui)(rl)
1928 rllen = getlen(ui)(rl)
1927
1929
1928 if startrev < 0:
1930 if startrev < 0:
1929 startrev = rllen + startrev
1931 startrev = rllen + startrev
1930
1932
1931 def d():
1933 def d():
1932 rl.clearcaches()
1934 rl.clearcaches()
1933
1935
1934 beginrev = startrev
1936 beginrev = startrev
1935 endrev = rllen
1937 endrev = rllen
1936 dist = opts[b'dist']
1938 dist = opts[b'dist']
1937
1939
1938 if reverse:
1940 if reverse:
1939 beginrev, endrev = endrev - 1, beginrev - 1
1941 beginrev, endrev = endrev - 1, beginrev - 1
1940 dist = -1 * dist
1942 dist = -1 * dist
1941
1943
1942 for x in _xrange(beginrev, endrev, dist):
1944 for x in _xrange(beginrev, endrev, dist):
1943 # Old revisions don't support passing int.
1945 # Old revisions don't support passing int.
1944 n = rl.node(x)
1946 n = rl.node(x)
1945 rl.revision(n)
1947 rl.revision(n)
1946
1948
1947 timer, fm = gettimer(ui, opts)
1949 timer, fm = gettimer(ui, opts)
1948 timer(d)
1950 timer(d)
1949 fm.end()
1951 fm.end()
1950
1952
1951 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1953 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1952 [(b's', b'startrev', 1000, b'revision to start writing at'),
1954 [(b's', b'startrev', 1000, b'revision to start writing at'),
1953 (b'', b'stoprev', -1, b'last revision to write'),
1955 (b'', b'stoprev', -1, b'last revision to write'),
1954 (b'', b'count', 3, b'last revision to write'),
1956 (b'', b'count', 3, b'last revision to write'),
1955 (b'', b'details', False, b'print timing for every revisions tested'),
1957 (b'', b'details', False, b'print timing for every revisions tested'),
1956 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1958 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1957 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1959 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1958 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1960 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1959 ],
1961 ],
1960 b'-c|-m|FILE')
1962 b'-c|-m|FILE')
1961 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1963 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1962 """Benchmark writing a series of revisions to a revlog.
1964 """Benchmark writing a series of revisions to a revlog.
1963
1965
1964 Possible source values are:
1966 Possible source values are:
1965 * `full`: add from a full text (default).
1967 * `full`: add from a full text (default).
1966 * `parent-1`: add from a delta to the first parent
1968 * `parent-1`: add from a delta to the first parent
1967 * `parent-2`: add from a delta to the second parent if it exists
1969 * `parent-2`: add from a delta to the second parent if it exists
1968 (use a delta from the first parent otherwise)
1970 (use a delta from the first parent otherwise)
1969 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1971 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1970 * `storage`: add from the existing precomputed deltas
1972 * `storage`: add from the existing precomputed deltas
1971 """
1973 """
1972 opts = _byteskwargs(opts)
1974 opts = _byteskwargs(opts)
1973
1975
1974 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1976 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1975 rllen = getlen(ui)(rl)
1977 rllen = getlen(ui)(rl)
1976 if startrev < 0:
1978 if startrev < 0:
1977 startrev = rllen + startrev
1979 startrev = rllen + startrev
1978 if stoprev < 0:
1980 if stoprev < 0:
1979 stoprev = rllen + stoprev
1981 stoprev = rllen + stoprev
1980
1982
1981 lazydeltabase = opts['lazydeltabase']
1983 lazydeltabase = opts['lazydeltabase']
1982 source = opts['source']
1984 source = opts['source']
1983 clearcaches = opts['clear_caches']
1985 clearcaches = opts['clear_caches']
1984 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1986 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1985 b'storage')
1987 b'storage')
1986 if source not in validsource:
1988 if source not in validsource:
1987 raise error.Abort('invalid source type: %s' % source)
1989 raise error.Abort('invalid source type: %s' % source)
1988
1990
1989 ### actually gather results
1991 ### actually gather results
1990 count = opts['count']
1992 count = opts['count']
1991 if count <= 0:
1993 if count <= 0:
1992 raise error.Abort('invalide run count: %d' % count)
1994 raise error.Abort('invalide run count: %d' % count)
1993 allresults = []
1995 allresults = []
1994 for c in range(count):
1996 for c in range(count):
1995 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1997 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1996 lazydeltabase=lazydeltabase,
1998 lazydeltabase=lazydeltabase,
1997 clearcaches=clearcaches)
1999 clearcaches=clearcaches)
1998 allresults.append(timing)
2000 allresults.append(timing)
1999
2001
2000 ### consolidate the results in a single list
2002 ### consolidate the results in a single list
2001 results = []
2003 results = []
2002 for idx, (rev, t) in enumerate(allresults[0]):
2004 for idx, (rev, t) in enumerate(allresults[0]):
2003 ts = [t]
2005 ts = [t]
2004 for other in allresults[1:]:
2006 for other in allresults[1:]:
2005 orev, ot = other[idx]
2007 orev, ot = other[idx]
2006 assert orev == rev
2008 assert orev == rev
2007 ts.append(ot)
2009 ts.append(ot)
2008 results.append((rev, ts))
2010 results.append((rev, ts))
2009 resultcount = len(results)
2011 resultcount = len(results)
2010
2012
2011 ### Compute and display relevant statistics
2013 ### Compute and display relevant statistics
2012
2014
2013 # get a formatter
2015 # get a formatter
2014 fm = ui.formatter(b'perf', opts)
2016 fm = ui.formatter(b'perf', opts)
2015 displayall = ui.configbool(b"perf", b"all-timing", False)
2017 displayall = ui.configbool(b"perf", b"all-timing", False)
2016
2018
2017 # print individual details if requested
2019 # print individual details if requested
2018 if opts['details']:
2020 if opts['details']:
2019 for idx, item in enumerate(results, 1):
2021 for idx, item in enumerate(results, 1):
2020 rev, data = item
2022 rev, data = item
2021 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2023 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2022 formatone(fm, data, title=title, displayall=displayall)
2024 formatone(fm, data, title=title, displayall=displayall)
2023
2025
2024 # sorts results by median time
2026 # sorts results by median time
2025 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2027 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2026 # list of (name, index) to display)
2028 # list of (name, index) to display)
2027 relevants = [
2029 relevants = [
2028 ("min", 0),
2030 ("min", 0),
2029 ("10%", resultcount * 10 // 100),
2031 ("10%", resultcount * 10 // 100),
2030 ("25%", resultcount * 25 // 100),
2032 ("25%", resultcount * 25 // 100),
2031 ("50%", resultcount * 70 // 100),
2033 ("50%", resultcount * 70 // 100),
2032 ("75%", resultcount * 75 // 100),
2034 ("75%", resultcount * 75 // 100),
2033 ("90%", resultcount * 90 // 100),
2035 ("90%", resultcount * 90 // 100),
2034 ("95%", resultcount * 95 // 100),
2036 ("95%", resultcount * 95 // 100),
2035 ("99%", resultcount * 99 // 100),
2037 ("99%", resultcount * 99 // 100),
2036 ("99.9%", resultcount * 999 // 1000),
2038 ("99.9%", resultcount * 999 // 1000),
2037 ("99.99%", resultcount * 9999 // 10000),
2039 ("99.99%", resultcount * 9999 // 10000),
2038 ("99.999%", resultcount * 99999 // 100000),
2040 ("99.999%", resultcount * 99999 // 100000),
2039 ("max", -1),
2041 ("max", -1),
2040 ]
2042 ]
2041 if not ui.quiet:
2043 if not ui.quiet:
2042 for name, idx in relevants:
2044 for name, idx in relevants:
2043 data = results[idx]
2045 data = results[idx]
2044 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2046 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2045 formatone(fm, data[1], title=title, displayall=displayall)
2047 formatone(fm, data[1], title=title, displayall=displayall)
2046
2048
2047 # XXX summing that many float will not be very precise, we ignore this fact
2049 # XXX summing that many float will not be very precise, we ignore this fact
2048 # for now
2050 # for now
2049 totaltime = []
2051 totaltime = []
2050 for item in allresults:
2052 for item in allresults:
2051 totaltime.append((sum(x[1][0] for x in item),
2053 totaltime.append((sum(x[1][0] for x in item),
2052 sum(x[1][1] for x in item),
2054 sum(x[1][1] for x in item),
2053 sum(x[1][2] for x in item),)
2055 sum(x[1][2] for x in item),)
2054 )
2056 )
2055 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2057 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2056 displayall=displayall)
2058 displayall=displayall)
2057 fm.end()
2059 fm.end()
2058
2060
2059 class _faketr(object):
2061 class _faketr(object):
2060 def add(s, x, y, z=None):
2062 def add(s, x, y, z=None):
2061 return None
2063 return None
2062
2064
2063 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2065 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2064 lazydeltabase=True, clearcaches=True):
2066 lazydeltabase=True, clearcaches=True):
2065 timings = []
2067 timings = []
2066 tr = _faketr()
2068 tr = _faketr()
2067 with _temprevlog(ui, orig, startrev) as dest:
2069 with _temprevlog(ui, orig, startrev) as dest:
2068 dest._lazydeltabase = lazydeltabase
2070 dest._lazydeltabase = lazydeltabase
2069 revs = list(orig.revs(startrev, stoprev))
2071 revs = list(orig.revs(startrev, stoprev))
2070 total = len(revs)
2072 total = len(revs)
2071 topic = 'adding'
2073 topic = 'adding'
2072 if runidx is not None:
2074 if runidx is not None:
2073 topic += ' (run #%d)' % runidx
2075 topic += ' (run #%d)' % runidx
2074 # Support both old and new progress API
2076 # Support both old and new progress API
2075 if util.safehasattr(ui, 'makeprogress'):
2077 if util.safehasattr(ui, 'makeprogress'):
2076 progress = ui.makeprogress(topic, unit='revs', total=total)
2078 progress = ui.makeprogress(topic, unit='revs', total=total)
2077 def updateprogress(pos):
2079 def updateprogress(pos):
2078 progress.update(pos)
2080 progress.update(pos)
2079 def completeprogress():
2081 def completeprogress():
2080 progress.complete()
2082 progress.complete()
2081 else:
2083 else:
2082 def updateprogress(pos):
2084 def updateprogress(pos):
2083 ui.progress(topic, pos, unit='revs', total=total)
2085 ui.progress(topic, pos, unit='revs', total=total)
2084 def completeprogress():
2086 def completeprogress():
2085 ui.progress(topic, None, unit='revs', total=total)
2087 ui.progress(topic, None, unit='revs', total=total)
2086
2088
2087 for idx, rev in enumerate(revs):
2089 for idx, rev in enumerate(revs):
2088 updateprogress(idx)
2090 updateprogress(idx)
2089 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2091 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2090 if clearcaches:
2092 if clearcaches:
2091 dest.index.clearcaches()
2093 dest.index.clearcaches()
2092 dest.clearcaches()
2094 dest.clearcaches()
2093 with timeone() as r:
2095 with timeone() as r:
2094 dest.addrawrevision(*addargs, **addkwargs)
2096 dest.addrawrevision(*addargs, **addkwargs)
2095 timings.append((rev, r[0]))
2097 timings.append((rev, r[0]))
2096 updateprogress(total)
2098 updateprogress(total)
2097 completeprogress()
2099 completeprogress()
2098 return timings
2100 return timings
2099
2101
2100 def _getrevisionseed(orig, rev, tr, source):
2102 def _getrevisionseed(orig, rev, tr, source):
2101 from mercurial.node import nullid
2103 from mercurial.node import nullid
2102
2104
2103 linkrev = orig.linkrev(rev)
2105 linkrev = orig.linkrev(rev)
2104 node = orig.node(rev)
2106 node = orig.node(rev)
2105 p1, p2 = orig.parents(node)
2107 p1, p2 = orig.parents(node)
2106 flags = orig.flags(rev)
2108 flags = orig.flags(rev)
2107 cachedelta = None
2109 cachedelta = None
2108 text = None
2110 text = None
2109
2111
2110 if source == b'full':
2112 if source == b'full':
2111 text = orig.revision(rev)
2113 text = orig.revision(rev)
2112 elif source == b'parent-1':
2114 elif source == b'parent-1':
2113 baserev = orig.rev(p1)
2115 baserev = orig.rev(p1)
2114 cachedelta = (baserev, orig.revdiff(p1, rev))
2116 cachedelta = (baserev, orig.revdiff(p1, rev))
2115 elif source == b'parent-2':
2117 elif source == b'parent-2':
2116 parent = p2
2118 parent = p2
2117 if p2 == nullid:
2119 if p2 == nullid:
2118 parent = p1
2120 parent = p1
2119 baserev = orig.rev(parent)
2121 baserev = orig.rev(parent)
2120 cachedelta = (baserev, orig.revdiff(parent, rev))
2122 cachedelta = (baserev, orig.revdiff(parent, rev))
2121 elif source == b'parent-smallest':
2123 elif source == b'parent-smallest':
2122 p1diff = orig.revdiff(p1, rev)
2124 p1diff = orig.revdiff(p1, rev)
2123 parent = p1
2125 parent = p1
2124 diff = p1diff
2126 diff = p1diff
2125 if p2 != nullid:
2127 if p2 != nullid:
2126 p2diff = orig.revdiff(p2, rev)
2128 p2diff = orig.revdiff(p2, rev)
2127 if len(p1diff) > len(p2diff):
2129 if len(p1diff) > len(p2diff):
2128 parent = p2
2130 parent = p2
2129 diff = p2diff
2131 diff = p2diff
2130 baserev = orig.rev(parent)
2132 baserev = orig.rev(parent)
2131 cachedelta = (baserev, diff)
2133 cachedelta = (baserev, diff)
2132 elif source == b'storage':
2134 elif source == b'storage':
2133 baserev = orig.deltaparent(rev)
2135 baserev = orig.deltaparent(rev)
2134 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2136 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2135
2137
2136 return ((text, tr, linkrev, p1, p2),
2138 return ((text, tr, linkrev, p1, p2),
2137 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2139 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2138
2140
2139 @contextlib.contextmanager
2141 @contextlib.contextmanager
2140 def _temprevlog(ui, orig, truncaterev):
2142 def _temprevlog(ui, orig, truncaterev):
2141 from mercurial import vfs as vfsmod
2143 from mercurial import vfs as vfsmod
2142
2144
2143 if orig._inline:
2145 if orig._inline:
2144 raise error.Abort('not supporting inline revlog (yet)')
2146 raise error.Abort('not supporting inline revlog (yet)')
2145
2147
2146 origindexpath = orig.opener.join(orig.indexfile)
2148 origindexpath = orig.opener.join(orig.indexfile)
2147 origdatapath = orig.opener.join(orig.datafile)
2149 origdatapath = orig.opener.join(orig.datafile)
2148 indexname = 'revlog.i'
2150 indexname = 'revlog.i'
2149 dataname = 'revlog.d'
2151 dataname = 'revlog.d'
2150
2152
2151 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2153 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2152 try:
2154 try:
2153 # copy the data file in a temporary directory
2155 # copy the data file in a temporary directory
2154 ui.debug('copying data in %s\n' % tmpdir)
2156 ui.debug('copying data in %s\n' % tmpdir)
2155 destindexpath = os.path.join(tmpdir, 'revlog.i')
2157 destindexpath = os.path.join(tmpdir, 'revlog.i')
2156 destdatapath = os.path.join(tmpdir, 'revlog.d')
2158 destdatapath = os.path.join(tmpdir, 'revlog.d')
2157 shutil.copyfile(origindexpath, destindexpath)
2159 shutil.copyfile(origindexpath, destindexpath)
2158 shutil.copyfile(origdatapath, destdatapath)
2160 shutil.copyfile(origdatapath, destdatapath)
2159
2161
2160 # remove the data we want to add again
2162 # remove the data we want to add again
2161 ui.debug('truncating data to be rewritten\n')
2163 ui.debug('truncating data to be rewritten\n')
2162 with open(destindexpath, 'ab') as index:
2164 with open(destindexpath, 'ab') as index:
2163 index.seek(0)
2165 index.seek(0)
2164 index.truncate(truncaterev * orig._io.size)
2166 index.truncate(truncaterev * orig._io.size)
2165 with open(destdatapath, 'ab') as data:
2167 with open(destdatapath, 'ab') as data:
2166 data.seek(0)
2168 data.seek(0)
2167 data.truncate(orig.start(truncaterev))
2169 data.truncate(orig.start(truncaterev))
2168
2170
2169 # instantiate a new revlog from the temporary copy
2171 # instantiate a new revlog from the temporary copy
2170 ui.debug('truncating adding to be rewritten\n')
2172 ui.debug('truncating adding to be rewritten\n')
2171 vfs = vfsmod.vfs(tmpdir)
2173 vfs = vfsmod.vfs(tmpdir)
2172 vfs.options = getattr(orig.opener, 'options', None)
2174 vfs.options = getattr(orig.opener, 'options', None)
2173
2175
2174 dest = revlog.revlog(vfs,
2176 dest = revlog.revlog(vfs,
2175 indexfile=indexname,
2177 indexfile=indexname,
2176 datafile=dataname)
2178 datafile=dataname)
2177 if dest._inline:
2179 if dest._inline:
2178 raise error.Abort('not supporting inline revlog (yet)')
2180 raise error.Abort('not supporting inline revlog (yet)')
2179 # make sure internals are initialized
2181 # make sure internals are initialized
2180 dest.revision(len(dest) - 1)
2182 dest.revision(len(dest) - 1)
2181 yield dest
2183 yield dest
2182 del dest, vfs
2184 del dest, vfs
2183 finally:
2185 finally:
2184 shutil.rmtree(tmpdir, True)
2186 shutil.rmtree(tmpdir, True)
2185
2187
2186 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2188 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2187 [(b'e', b'engines', b'', b'compression engines to use'),
2189 [(b'e', b'engines', b'', b'compression engines to use'),
2188 (b's', b'startrev', 0, b'revision to start at')],
2190 (b's', b'startrev', 0, b'revision to start at')],
2189 b'-c|-m|FILE')
2191 b'-c|-m|FILE')
2190 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2192 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2191 """Benchmark operations on revlog chunks.
2193 """Benchmark operations on revlog chunks.
2192
2194
2193 Logically, each revlog is a collection of fulltext revisions. However,
2195 Logically, each revlog is a collection of fulltext revisions. However,
2194 stored within each revlog are "chunks" of possibly compressed data. This
2196 stored within each revlog are "chunks" of possibly compressed data. This
2195 data needs to be read and decompressed or compressed and written.
2197 data needs to be read and decompressed or compressed and written.
2196
2198
2197 This command measures the time it takes to read+decompress and recompress
2199 This command measures the time it takes to read+decompress and recompress
2198 chunks in a revlog. It effectively isolates I/O and compression performance.
2200 chunks in a revlog. It effectively isolates I/O and compression performance.
2199 For measurements of higher-level operations like resolving revisions,
2201 For measurements of higher-level operations like resolving revisions,
2200 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2202 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2201 """
2203 """
2202 opts = _byteskwargs(opts)
2204 opts = _byteskwargs(opts)
2203
2205
2204 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2206 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2205
2207
2206 # _chunkraw was renamed to _getsegmentforrevs.
2208 # _chunkraw was renamed to _getsegmentforrevs.
2207 try:
2209 try:
2208 segmentforrevs = rl._getsegmentforrevs
2210 segmentforrevs = rl._getsegmentforrevs
2209 except AttributeError:
2211 except AttributeError:
2210 segmentforrevs = rl._chunkraw
2212 segmentforrevs = rl._chunkraw
2211
2213
2212 # Verify engines argument.
2214 # Verify engines argument.
2213 if engines:
2215 if engines:
2214 engines = set(e.strip() for e in engines.split(b','))
2216 engines = set(e.strip() for e in engines.split(b','))
2215 for engine in engines:
2217 for engine in engines:
2216 try:
2218 try:
2217 util.compressionengines[engine]
2219 util.compressionengines[engine]
2218 except KeyError:
2220 except KeyError:
2219 raise error.Abort(b'unknown compression engine: %s' % engine)
2221 raise error.Abort(b'unknown compression engine: %s' % engine)
2220 else:
2222 else:
2221 engines = []
2223 engines = []
2222 for e in util.compengines:
2224 for e in util.compengines:
2223 engine = util.compengines[e]
2225 engine = util.compengines[e]
2224 try:
2226 try:
2225 if engine.available():
2227 if engine.available():
2226 engine.revlogcompressor().compress(b'dummy')
2228 engine.revlogcompressor().compress(b'dummy')
2227 engines.append(e)
2229 engines.append(e)
2228 except NotImplementedError:
2230 except NotImplementedError:
2229 pass
2231 pass
2230
2232
2231 revs = list(rl.revs(startrev, len(rl) - 1))
2233 revs = list(rl.revs(startrev, len(rl) - 1))
2232
2234
2233 def rlfh(rl):
2235 def rlfh(rl):
2234 if rl._inline:
2236 if rl._inline:
2235 return getsvfs(repo)(rl.indexfile)
2237 return getsvfs(repo)(rl.indexfile)
2236 else:
2238 else:
2237 return getsvfs(repo)(rl.datafile)
2239 return getsvfs(repo)(rl.datafile)
2238
2240
2239 def doread():
2241 def doread():
2240 rl.clearcaches()
2242 rl.clearcaches()
2241 for rev in revs:
2243 for rev in revs:
2242 segmentforrevs(rev, rev)
2244 segmentforrevs(rev, rev)
2243
2245
2244 def doreadcachedfh():
2246 def doreadcachedfh():
2245 rl.clearcaches()
2247 rl.clearcaches()
2246 fh = rlfh(rl)
2248 fh = rlfh(rl)
2247 for rev in revs:
2249 for rev in revs:
2248 segmentforrevs(rev, rev, df=fh)
2250 segmentforrevs(rev, rev, df=fh)
2249
2251
2250 def doreadbatch():
2252 def doreadbatch():
2251 rl.clearcaches()
2253 rl.clearcaches()
2252 segmentforrevs(revs[0], revs[-1])
2254 segmentforrevs(revs[0], revs[-1])
2253
2255
2254 def doreadbatchcachedfh():
2256 def doreadbatchcachedfh():
2255 rl.clearcaches()
2257 rl.clearcaches()
2256 fh = rlfh(rl)
2258 fh = rlfh(rl)
2257 segmentforrevs(revs[0], revs[-1], df=fh)
2259 segmentforrevs(revs[0], revs[-1], df=fh)
2258
2260
2259 def dochunk():
2261 def dochunk():
2260 rl.clearcaches()
2262 rl.clearcaches()
2261 fh = rlfh(rl)
2263 fh = rlfh(rl)
2262 for rev in revs:
2264 for rev in revs:
2263 rl._chunk(rev, df=fh)
2265 rl._chunk(rev, df=fh)
2264
2266
2265 chunks = [None]
2267 chunks = [None]
2266
2268
2267 def dochunkbatch():
2269 def dochunkbatch():
2268 rl.clearcaches()
2270 rl.clearcaches()
2269 fh = rlfh(rl)
2271 fh = rlfh(rl)
2270 # Save chunks as a side-effect.
2272 # Save chunks as a side-effect.
2271 chunks[0] = rl._chunks(revs, df=fh)
2273 chunks[0] = rl._chunks(revs, df=fh)
2272
2274
2273 def docompress(compressor):
2275 def docompress(compressor):
2274 rl.clearcaches()
2276 rl.clearcaches()
2275
2277
2276 try:
2278 try:
2277 # Swap in the requested compression engine.
2279 # Swap in the requested compression engine.
2278 oldcompressor = rl._compressor
2280 oldcompressor = rl._compressor
2279 rl._compressor = compressor
2281 rl._compressor = compressor
2280 for chunk in chunks[0]:
2282 for chunk in chunks[0]:
2281 rl.compress(chunk)
2283 rl.compress(chunk)
2282 finally:
2284 finally:
2283 rl._compressor = oldcompressor
2285 rl._compressor = oldcompressor
2284
2286
2285 benches = [
2287 benches = [
2286 (lambda: doread(), b'read'),
2288 (lambda: doread(), b'read'),
2287 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2289 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2288 (lambda: doreadbatch(), b'read batch'),
2290 (lambda: doreadbatch(), b'read batch'),
2289 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2291 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2290 (lambda: dochunk(), b'chunk'),
2292 (lambda: dochunk(), b'chunk'),
2291 (lambda: dochunkbatch(), b'chunk batch'),
2293 (lambda: dochunkbatch(), b'chunk batch'),
2292 ]
2294 ]
2293
2295
2294 for engine in sorted(engines):
2296 for engine in sorted(engines):
2295 compressor = util.compengines[engine].revlogcompressor()
2297 compressor = util.compengines[engine].revlogcompressor()
2296 benches.append((functools.partial(docompress, compressor),
2298 benches.append((functools.partial(docompress, compressor),
2297 b'compress w/ %s' % engine))
2299 b'compress w/ %s' % engine))
2298
2300
2299 for fn, title in benches:
2301 for fn, title in benches:
2300 timer, fm = gettimer(ui, opts)
2302 timer, fm = gettimer(ui, opts)
2301 timer(fn, title=title)
2303 timer(fn, title=title)
2302 fm.end()
2304 fm.end()
2303
2305
2304 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2306 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2305 [(b'', b'cache', False, b'use caches instead of clearing')],
2307 [(b'', b'cache', False, b'use caches instead of clearing')],
2306 b'-c|-m|FILE REV')
2308 b'-c|-m|FILE REV')
2307 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2309 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2308 """Benchmark obtaining a revlog revision.
2310 """Benchmark obtaining a revlog revision.
2309
2311
2310 Obtaining a revlog revision consists of roughly the following steps:
2312 Obtaining a revlog revision consists of roughly the following steps:
2311
2313
2312 1. Compute the delta chain
2314 1. Compute the delta chain
2313 2. Slice the delta chain if applicable
2315 2. Slice the delta chain if applicable
2314 3. Obtain the raw chunks for that delta chain
2316 3. Obtain the raw chunks for that delta chain
2315 4. Decompress each raw chunk
2317 4. Decompress each raw chunk
2316 5. Apply binary patches to obtain fulltext
2318 5. Apply binary patches to obtain fulltext
2317 6. Verify hash of fulltext
2319 6. Verify hash of fulltext
2318
2320
2319 This command measures the time spent in each of these phases.
2321 This command measures the time spent in each of these phases.
2320 """
2322 """
2321 opts = _byteskwargs(opts)
2323 opts = _byteskwargs(opts)
2322
2324
2323 if opts.get(b'changelog') or opts.get(b'manifest'):
2325 if opts.get(b'changelog') or opts.get(b'manifest'):
2324 file_, rev = None, file_
2326 file_, rev = None, file_
2325 elif rev is None:
2327 elif rev is None:
2326 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2328 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2327
2329
2328 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2330 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2329
2331
2330 # _chunkraw was renamed to _getsegmentforrevs.
2332 # _chunkraw was renamed to _getsegmentforrevs.
2331 try:
2333 try:
2332 segmentforrevs = r._getsegmentforrevs
2334 segmentforrevs = r._getsegmentforrevs
2333 except AttributeError:
2335 except AttributeError:
2334 segmentforrevs = r._chunkraw
2336 segmentforrevs = r._chunkraw
2335
2337
2336 node = r.lookup(rev)
2338 node = r.lookup(rev)
2337 rev = r.rev(node)
2339 rev = r.rev(node)
2338
2340
2339 def getrawchunks(data, chain):
2341 def getrawchunks(data, chain):
2340 start = r.start
2342 start = r.start
2341 length = r.length
2343 length = r.length
2342 inline = r._inline
2344 inline = r._inline
2343 iosize = r._io.size
2345 iosize = r._io.size
2344 buffer = util.buffer
2346 buffer = util.buffer
2345
2347
2346 chunks = []
2348 chunks = []
2347 ladd = chunks.append
2349 ladd = chunks.append
2348 for idx, item in enumerate(chain):
2350 for idx, item in enumerate(chain):
2349 offset = start(item[0])
2351 offset = start(item[0])
2350 bits = data[idx]
2352 bits = data[idx]
2351 for rev in item:
2353 for rev in item:
2352 chunkstart = start(rev)
2354 chunkstart = start(rev)
2353 if inline:
2355 if inline:
2354 chunkstart += (rev + 1) * iosize
2356 chunkstart += (rev + 1) * iosize
2355 chunklength = length(rev)
2357 chunklength = length(rev)
2356 ladd(buffer(bits, chunkstart - offset, chunklength))
2358 ladd(buffer(bits, chunkstart - offset, chunklength))
2357
2359
2358 return chunks
2360 return chunks
2359
2361
2360 def dodeltachain(rev):
2362 def dodeltachain(rev):
2361 if not cache:
2363 if not cache:
2362 r.clearcaches()
2364 r.clearcaches()
2363 r._deltachain(rev)
2365 r._deltachain(rev)
2364
2366
2365 def doread(chain):
2367 def doread(chain):
2366 if not cache:
2368 if not cache:
2367 r.clearcaches()
2369 r.clearcaches()
2368 for item in slicedchain:
2370 for item in slicedchain:
2369 segmentforrevs(item[0], item[-1])
2371 segmentforrevs(item[0], item[-1])
2370
2372
2371 def doslice(r, chain, size):
2373 def doslice(r, chain, size):
2372 for s in slicechunk(r, chain, targetsize=size):
2374 for s in slicechunk(r, chain, targetsize=size):
2373 pass
2375 pass
2374
2376
2375 def dorawchunks(data, chain):
2377 def dorawchunks(data, chain):
2376 if not cache:
2378 if not cache:
2377 r.clearcaches()
2379 r.clearcaches()
2378 getrawchunks(data, chain)
2380 getrawchunks(data, chain)
2379
2381
2380 def dodecompress(chunks):
2382 def dodecompress(chunks):
2381 decomp = r.decompress
2383 decomp = r.decompress
2382 for chunk in chunks:
2384 for chunk in chunks:
2383 decomp(chunk)
2385 decomp(chunk)
2384
2386
2385 def dopatch(text, bins):
2387 def dopatch(text, bins):
2386 if not cache:
2388 if not cache:
2387 r.clearcaches()
2389 r.clearcaches()
2388 mdiff.patches(text, bins)
2390 mdiff.patches(text, bins)
2389
2391
2390 def dohash(text):
2392 def dohash(text):
2391 if not cache:
2393 if not cache:
2392 r.clearcaches()
2394 r.clearcaches()
2393 r.checkhash(text, node, rev=rev)
2395 r.checkhash(text, node, rev=rev)
2394
2396
2395 def dorevision():
2397 def dorevision():
2396 if not cache:
2398 if not cache:
2397 r.clearcaches()
2399 r.clearcaches()
2398 r.revision(node)
2400 r.revision(node)
2399
2401
2400 try:
2402 try:
2401 from mercurial.revlogutils.deltas import slicechunk
2403 from mercurial.revlogutils.deltas import slicechunk
2402 except ImportError:
2404 except ImportError:
2403 slicechunk = getattr(revlog, '_slicechunk', None)
2405 slicechunk = getattr(revlog, '_slicechunk', None)
2404
2406
2405 size = r.length(rev)
2407 size = r.length(rev)
2406 chain = r._deltachain(rev)[0]
2408 chain = r._deltachain(rev)[0]
2407 if not getattr(r, '_withsparseread', False):
2409 if not getattr(r, '_withsparseread', False):
2408 slicedchain = (chain,)
2410 slicedchain = (chain,)
2409 else:
2411 else:
2410 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2412 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2411 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2413 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2412 rawchunks = getrawchunks(data, slicedchain)
2414 rawchunks = getrawchunks(data, slicedchain)
2413 bins = r._chunks(chain)
2415 bins = r._chunks(chain)
2414 text = bytes(bins[0])
2416 text = bytes(bins[0])
2415 bins = bins[1:]
2417 bins = bins[1:]
2416 text = mdiff.patches(text, bins)
2418 text = mdiff.patches(text, bins)
2417
2419
2418 benches = [
2420 benches = [
2419 (lambda: dorevision(), b'full'),
2421 (lambda: dorevision(), b'full'),
2420 (lambda: dodeltachain(rev), b'deltachain'),
2422 (lambda: dodeltachain(rev), b'deltachain'),
2421 (lambda: doread(chain), b'read'),
2423 (lambda: doread(chain), b'read'),
2422 ]
2424 ]
2423
2425
2424 if getattr(r, '_withsparseread', False):
2426 if getattr(r, '_withsparseread', False):
2425 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2427 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2426 benches.append(slicing)
2428 benches.append(slicing)
2427
2429
2428 benches.extend([
2430 benches.extend([
2429 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2431 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2430 (lambda: dodecompress(rawchunks), b'decompress'),
2432 (lambda: dodecompress(rawchunks), b'decompress'),
2431 (lambda: dopatch(text, bins), b'patch'),
2433 (lambda: dopatch(text, bins), b'patch'),
2432 (lambda: dohash(text), b'hash'),
2434 (lambda: dohash(text), b'hash'),
2433 ])
2435 ])
2434
2436
2435 timer, fm = gettimer(ui, opts)
2437 timer, fm = gettimer(ui, opts)
2436 for fn, title in benches:
2438 for fn, title in benches:
2437 timer(fn, title=title)
2439 timer(fn, title=title)
2438 fm.end()
2440 fm.end()
2439
2441
2440 @command(b'perfrevset',
2442 @command(b'perfrevset',
2441 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2443 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2442 (b'', b'contexts', False, b'obtain changectx for each revision')]
2444 (b'', b'contexts', False, b'obtain changectx for each revision')]
2443 + formatteropts, b"REVSET")
2445 + formatteropts, b"REVSET")
2444 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2446 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2445 """benchmark the execution time of a revset
2447 """benchmark the execution time of a revset
2446
2448
2447 Use the --clean option if need to evaluate the impact of build volatile
2449 Use the --clean option if need to evaluate the impact of build volatile
2448 revisions set cache on the revset execution. Volatile cache hold filtered
2450 revisions set cache on the revset execution. Volatile cache hold filtered
2449 and obsolete related cache."""
2451 and obsolete related cache."""
2450 opts = _byteskwargs(opts)
2452 opts = _byteskwargs(opts)
2451
2453
2452 timer, fm = gettimer(ui, opts)
2454 timer, fm = gettimer(ui, opts)
2453 def d():
2455 def d():
2454 if clear:
2456 if clear:
2455 repo.invalidatevolatilesets()
2457 repo.invalidatevolatilesets()
2456 if contexts:
2458 if contexts:
2457 for ctx in repo.set(expr): pass
2459 for ctx in repo.set(expr): pass
2458 else:
2460 else:
2459 for r in repo.revs(expr): pass
2461 for r in repo.revs(expr): pass
2460 timer(d)
2462 timer(d)
2461 fm.end()
2463 fm.end()
2462
2464
2463 @command(b'perfvolatilesets',
2465 @command(b'perfvolatilesets',
2464 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2466 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2465 ] + formatteropts)
2467 ] + formatteropts)
2466 def perfvolatilesets(ui, repo, *names, **opts):
2468 def perfvolatilesets(ui, repo, *names, **opts):
2467 """benchmark the computation of various volatile set
2469 """benchmark the computation of various volatile set
2468
2470
2469 Volatile set computes element related to filtering and obsolescence."""
2471 Volatile set computes element related to filtering and obsolescence."""
2470 opts = _byteskwargs(opts)
2472 opts = _byteskwargs(opts)
2471 timer, fm = gettimer(ui, opts)
2473 timer, fm = gettimer(ui, opts)
2472 repo = repo.unfiltered()
2474 repo = repo.unfiltered()
2473
2475
2474 def getobs(name):
2476 def getobs(name):
2475 def d():
2477 def d():
2476 repo.invalidatevolatilesets()
2478 repo.invalidatevolatilesets()
2477 if opts[b'clear_obsstore']:
2479 if opts[b'clear_obsstore']:
2478 clearfilecache(repo, b'obsstore')
2480 clearfilecache(repo, b'obsstore')
2479 obsolete.getrevs(repo, name)
2481 obsolete.getrevs(repo, name)
2480 return d
2482 return d
2481
2483
2482 allobs = sorted(obsolete.cachefuncs)
2484 allobs = sorted(obsolete.cachefuncs)
2483 if names:
2485 if names:
2484 allobs = [n for n in allobs if n in names]
2486 allobs = [n for n in allobs if n in names]
2485
2487
2486 for name in allobs:
2488 for name in allobs:
2487 timer(getobs(name), title=name)
2489 timer(getobs(name), title=name)
2488
2490
2489 def getfiltered(name):
2491 def getfiltered(name):
2490 def d():
2492 def d():
2491 repo.invalidatevolatilesets()
2493 repo.invalidatevolatilesets()
2492 if opts[b'clear_obsstore']:
2494 if opts[b'clear_obsstore']:
2493 clearfilecache(repo, b'obsstore')
2495 clearfilecache(repo, b'obsstore')
2494 repoview.filterrevs(repo, name)
2496 repoview.filterrevs(repo, name)
2495 return d
2497 return d
2496
2498
2497 allfilter = sorted(repoview.filtertable)
2499 allfilter = sorted(repoview.filtertable)
2498 if names:
2500 if names:
2499 allfilter = [n for n in allfilter if n in names]
2501 allfilter = [n for n in allfilter if n in names]
2500
2502
2501 for name in allfilter:
2503 for name in allfilter:
2502 timer(getfiltered(name), title=name)
2504 timer(getfiltered(name), title=name)
2503 fm.end()
2505 fm.end()
2504
2506
2505 @command(b'perfbranchmap',
2507 @command(b'perfbranchmap',
2506 [(b'f', b'full', False,
2508 [(b'f', b'full', False,
2507 b'Includes build time of subset'),
2509 b'Includes build time of subset'),
2508 (b'', b'clear-revbranch', False,
2510 (b'', b'clear-revbranch', False,
2509 b'purge the revbranch cache between computation'),
2511 b'purge the revbranch cache between computation'),
2510 ] + formatteropts)
2512 ] + formatteropts)
2511 def perfbranchmap(ui, repo, *filternames, **opts):
2513 def perfbranchmap(ui, repo, *filternames, **opts):
2512 """benchmark the update of a branchmap
2514 """benchmark the update of a branchmap
2513
2515
2514 This benchmarks the full repo.branchmap() call with read and write disabled
2516 This benchmarks the full repo.branchmap() call with read and write disabled
2515 """
2517 """
2516 opts = _byteskwargs(opts)
2518 opts = _byteskwargs(opts)
2517 full = opts.get(b"full", False)
2519 full = opts.get(b"full", False)
2518 clear_revbranch = opts.get(b"clear_revbranch", False)
2520 clear_revbranch = opts.get(b"clear_revbranch", False)
2519 timer, fm = gettimer(ui, opts)
2521 timer, fm = gettimer(ui, opts)
2520 def getbranchmap(filtername):
2522 def getbranchmap(filtername):
2521 """generate a benchmark function for the filtername"""
2523 """generate a benchmark function for the filtername"""
2522 if filtername is None:
2524 if filtername is None:
2523 view = repo
2525 view = repo
2524 else:
2526 else:
2525 view = repo.filtered(filtername)
2527 view = repo.filtered(filtername)
2526 if util.safehasattr(view._branchcaches, '_per_filter'):
2528 if util.safehasattr(view._branchcaches, '_per_filter'):
2527 filtered = view._branchcaches._per_filter
2529 filtered = view._branchcaches._per_filter
2528 else:
2530 else:
2529 # older versions
2531 # older versions
2530 filtered = view._branchcaches
2532 filtered = view._branchcaches
2531 def d():
2533 def d():
2532 if clear_revbranch:
2534 if clear_revbranch:
2533 repo.revbranchcache()._clear()
2535 repo.revbranchcache()._clear()
2534 if full:
2536 if full:
2535 view._branchcaches.clear()
2537 view._branchcaches.clear()
2536 else:
2538 else:
2537 filtered.pop(filtername, None)
2539 filtered.pop(filtername, None)
2538 view.branchmap()
2540 view.branchmap()
2539 return d
2541 return d
2540 # add filter in smaller subset to bigger subset
2542 # add filter in smaller subset to bigger subset
2541 possiblefilters = set(repoview.filtertable)
2543 possiblefilters = set(repoview.filtertable)
2542 if filternames:
2544 if filternames:
2543 possiblefilters &= set(filternames)
2545 possiblefilters &= set(filternames)
2544 subsettable = getbranchmapsubsettable()
2546 subsettable = getbranchmapsubsettable()
2545 allfilters = []
2547 allfilters = []
2546 while possiblefilters:
2548 while possiblefilters:
2547 for name in possiblefilters:
2549 for name in possiblefilters:
2548 subset = subsettable.get(name)
2550 subset = subsettable.get(name)
2549 if subset not in possiblefilters:
2551 if subset not in possiblefilters:
2550 break
2552 break
2551 else:
2553 else:
2552 assert False, b'subset cycle %s!' % possiblefilters
2554 assert False, b'subset cycle %s!' % possiblefilters
2553 allfilters.append(name)
2555 allfilters.append(name)
2554 possiblefilters.remove(name)
2556 possiblefilters.remove(name)
2555
2557
2556 # warm the cache
2558 # warm the cache
2557 if not full:
2559 if not full:
2558 for name in allfilters:
2560 for name in allfilters:
2559 repo.filtered(name).branchmap()
2561 repo.filtered(name).branchmap()
2560 if not filternames or b'unfiltered' in filternames:
2562 if not filternames or b'unfiltered' in filternames:
2561 # add unfiltered
2563 # add unfiltered
2562 allfilters.append(None)
2564 allfilters.append(None)
2563
2565
2564 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2566 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2565 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2567 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2566 branchcacheread.set(classmethod(lambda *args: None))
2568 branchcacheread.set(classmethod(lambda *args: None))
2567 else:
2569 else:
2568 # older versions
2570 # older versions
2569 branchcacheread = safeattrsetter(branchmap, b'read')
2571 branchcacheread = safeattrsetter(branchmap, b'read')
2570 branchcacheread.set(lambda *args: None)
2572 branchcacheread.set(lambda *args: None)
2571 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2573 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2572 branchcachewrite.set(lambda *args: None)
2574 branchcachewrite.set(lambda *args: None)
2573 try:
2575 try:
2574 for name in allfilters:
2576 for name in allfilters:
2575 printname = name
2577 printname = name
2576 if name is None:
2578 if name is None:
2577 printname = b'unfiltered'
2579 printname = b'unfiltered'
2578 timer(getbranchmap(name), title=str(printname))
2580 timer(getbranchmap(name), title=str(printname))
2579 finally:
2581 finally:
2580 branchcacheread.restore()
2582 branchcacheread.restore()
2581 branchcachewrite.restore()
2583 branchcachewrite.restore()
2582 fm.end()
2584 fm.end()
2583
2585
2584 @command(b'perfbranchmapupdate', [
2586 @command(b'perfbranchmapupdate', [
2585 (b'', b'base', [], b'subset of revision to start from'),
2587 (b'', b'base', [], b'subset of revision to start from'),
2586 (b'', b'target', [], b'subset of revision to end with'),
2588 (b'', b'target', [], b'subset of revision to end with'),
2587 (b'', b'clear-caches', False, b'clear cache between each runs')
2589 (b'', b'clear-caches', False, b'clear cache between each runs')
2588 ] + formatteropts)
2590 ] + formatteropts)
2589 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2591 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2590 """benchmark branchmap update from for <base> revs to <target> revs
2592 """benchmark branchmap update from for <base> revs to <target> revs
2591
2593
2592 If `--clear-caches` is passed, the following items will be reset before
2594 If `--clear-caches` is passed, the following items will be reset before
2593 each update:
2595 each update:
2594 * the changelog instance and associated indexes
2596 * the changelog instance and associated indexes
2595 * the rev-branch-cache instance
2597 * the rev-branch-cache instance
2596
2598
2597 Examples:
2599 Examples:
2598
2600
2599 # update for the one last revision
2601 # update for the one last revision
2600 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2602 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2601
2603
2602 $ update for change coming with a new branch
2604 $ update for change coming with a new branch
2603 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2605 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2604 """
2606 """
2605 from mercurial import branchmap
2607 from mercurial import branchmap
2606 from mercurial import repoview
2608 from mercurial import repoview
2607 opts = _byteskwargs(opts)
2609 opts = _byteskwargs(opts)
2608 timer, fm = gettimer(ui, opts)
2610 timer, fm = gettimer(ui, opts)
2609 clearcaches = opts[b'clear_caches']
2611 clearcaches = opts[b'clear_caches']
2610 unfi = repo.unfiltered()
2612 unfi = repo.unfiltered()
2611 x = [None] # used to pass data between closure
2613 x = [None] # used to pass data between closure
2612
2614
2613 # we use a `list` here to avoid possible side effect from smartset
2615 # we use a `list` here to avoid possible side effect from smartset
2614 baserevs = list(scmutil.revrange(repo, base))
2616 baserevs = list(scmutil.revrange(repo, base))
2615 targetrevs = list(scmutil.revrange(repo, target))
2617 targetrevs = list(scmutil.revrange(repo, target))
2616 if not baserevs:
2618 if not baserevs:
2617 raise error.Abort(b'no revisions selected for --base')
2619 raise error.Abort(b'no revisions selected for --base')
2618 if not targetrevs:
2620 if not targetrevs:
2619 raise error.Abort(b'no revisions selected for --target')
2621 raise error.Abort(b'no revisions selected for --target')
2620
2622
2621 # make sure the target branchmap also contains the one in the base
2623 # make sure the target branchmap also contains the one in the base
2622 targetrevs = list(set(baserevs) | set(targetrevs))
2624 targetrevs = list(set(baserevs) | set(targetrevs))
2623 targetrevs.sort()
2625 targetrevs.sort()
2624
2626
2625 cl = repo.changelog
2627 cl = repo.changelog
2626 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2628 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2627 allbaserevs.sort()
2629 allbaserevs.sort()
2628 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2630 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2629
2631
2630 newrevs = list(alltargetrevs.difference(allbaserevs))
2632 newrevs = list(alltargetrevs.difference(allbaserevs))
2631 newrevs.sort()
2633 newrevs.sort()
2632
2634
2633 allrevs = frozenset(unfi.changelog.revs())
2635 allrevs = frozenset(unfi.changelog.revs())
2634 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2636 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2635 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2637 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2636
2638
2637 def basefilter(repo, visibilityexceptions=None):
2639 def basefilter(repo, visibilityexceptions=None):
2638 return basefilterrevs
2640 return basefilterrevs
2639
2641
2640 def targetfilter(repo, visibilityexceptions=None):
2642 def targetfilter(repo, visibilityexceptions=None):
2641 return targetfilterrevs
2643 return targetfilterrevs
2642
2644
2643 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2645 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2644 ui.status(msg % (len(allbaserevs), len(newrevs)))
2646 ui.status(msg % (len(allbaserevs), len(newrevs)))
2645 if targetfilterrevs:
2647 if targetfilterrevs:
2646 msg = b'(%d revisions still filtered)\n'
2648 msg = b'(%d revisions still filtered)\n'
2647 ui.status(msg % len(targetfilterrevs))
2649 ui.status(msg % len(targetfilterrevs))
2648
2650
2649 try:
2651 try:
2650 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2652 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2651 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2653 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2652
2654
2653 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2655 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2654 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2656 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2655
2657
2656 # try to find an existing branchmap to reuse
2658 # try to find an existing branchmap to reuse
2657 subsettable = getbranchmapsubsettable()
2659 subsettable = getbranchmapsubsettable()
2658 candidatefilter = subsettable.get(None)
2660 candidatefilter = subsettable.get(None)
2659 while candidatefilter is not None:
2661 while candidatefilter is not None:
2660 candidatebm = repo.filtered(candidatefilter).branchmap()
2662 candidatebm = repo.filtered(candidatefilter).branchmap()
2661 if candidatebm.validfor(baserepo):
2663 if candidatebm.validfor(baserepo):
2662 filtered = repoview.filterrevs(repo, candidatefilter)
2664 filtered = repoview.filterrevs(repo, candidatefilter)
2663 missing = [r for r in allbaserevs if r in filtered]
2665 missing = [r for r in allbaserevs if r in filtered]
2664 base = candidatebm.copy()
2666 base = candidatebm.copy()
2665 base.update(baserepo, missing)
2667 base.update(baserepo, missing)
2666 break
2668 break
2667 candidatefilter = subsettable.get(candidatefilter)
2669 candidatefilter = subsettable.get(candidatefilter)
2668 else:
2670 else:
2669 # no suitable subset where found
2671 # no suitable subset where found
2670 base = branchmap.branchcache()
2672 base = branchmap.branchcache()
2671 base.update(baserepo, allbaserevs)
2673 base.update(baserepo, allbaserevs)
2672
2674
2673 def setup():
2675 def setup():
2674 x[0] = base.copy()
2676 x[0] = base.copy()
2675 if clearcaches:
2677 if clearcaches:
2676 unfi._revbranchcache = None
2678 unfi._revbranchcache = None
2677 clearchangelog(repo)
2679 clearchangelog(repo)
2678
2680
2679 def bench():
2681 def bench():
2680 x[0].update(targetrepo, newrevs)
2682 x[0].update(targetrepo, newrevs)
2681
2683
2682 timer(bench, setup=setup)
2684 timer(bench, setup=setup)
2683 fm.end()
2685 fm.end()
2684 finally:
2686 finally:
2685 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2687 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2686 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2688 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2687
2689
2688 @command(b'perfbranchmapload', [
2690 @command(b'perfbranchmapload', [
2689 (b'f', b'filter', b'', b'Specify repoview filter'),
2691 (b'f', b'filter', b'', b'Specify repoview filter'),
2690 (b'', b'list', False, b'List brachmap filter caches'),
2692 (b'', b'list', False, b'List brachmap filter caches'),
2691 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2693 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2692
2694
2693 ] + formatteropts)
2695 ] + formatteropts)
2694 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2696 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2695 """benchmark reading the branchmap"""
2697 """benchmark reading the branchmap"""
2696 opts = _byteskwargs(opts)
2698 opts = _byteskwargs(opts)
2697 clearrevlogs = opts[b'clear_revlogs']
2699 clearrevlogs = opts[b'clear_revlogs']
2698
2700
2699 if list:
2701 if list:
2700 for name, kind, st in repo.cachevfs.readdir(stat=True):
2702 for name, kind, st in repo.cachevfs.readdir(stat=True):
2701 if name.startswith(b'branch2'):
2703 if name.startswith(b'branch2'):
2702 filtername = name.partition(b'-')[2] or b'unfiltered'
2704 filtername = name.partition(b'-')[2] or b'unfiltered'
2703 ui.status(b'%s - %s\n'
2705 ui.status(b'%s - %s\n'
2704 % (filtername, util.bytecount(st.st_size)))
2706 % (filtername, util.bytecount(st.st_size)))
2705 return
2707 return
2706 if not filter:
2708 if not filter:
2707 filter = None
2709 filter = None
2708 subsettable = getbranchmapsubsettable()
2710 subsettable = getbranchmapsubsettable()
2709 if filter is None:
2711 if filter is None:
2710 repo = repo.unfiltered()
2712 repo = repo.unfiltered()
2711 else:
2713 else:
2712 repo = repoview.repoview(repo, filter)
2714 repo = repoview.repoview(repo, filter)
2713
2715
2714 repo.branchmap() # make sure we have a relevant, up to date branchmap
2716 repo.branchmap() # make sure we have a relevant, up to date branchmap
2715
2717
2716 try:
2718 try:
2717 fromfile = branchmap.branchcache.fromfile
2719 fromfile = branchmap.branchcache.fromfile
2718 except AttributeError:
2720 except AttributeError:
2719 # older versions
2721 # older versions
2720 fromfile = branchmap.read
2722 fromfile = branchmap.read
2721
2723
2722 currentfilter = filter
2724 currentfilter = filter
2723 # try once without timer, the filter may not be cached
2725 # try once without timer, the filter may not be cached
2724 while fromfile(repo) is None:
2726 while fromfile(repo) is None:
2725 currentfilter = subsettable.get(currentfilter)
2727 currentfilter = subsettable.get(currentfilter)
2726 if currentfilter is None:
2728 if currentfilter is None:
2727 raise error.Abort(b'No branchmap cached for %s repo'
2729 raise error.Abort(b'No branchmap cached for %s repo'
2728 % (filter or b'unfiltered'))
2730 % (filter or b'unfiltered'))
2729 repo = repo.filtered(currentfilter)
2731 repo = repo.filtered(currentfilter)
2730 timer, fm = gettimer(ui, opts)
2732 timer, fm = gettimer(ui, opts)
2731 def setup():
2733 def setup():
2732 if clearrevlogs:
2734 if clearrevlogs:
2733 clearchangelog(repo)
2735 clearchangelog(repo)
2734 def bench():
2736 def bench():
2735 fromfile(repo)
2737 fromfile(repo)
2736 timer(bench, setup=setup)
2738 timer(bench, setup=setup)
2737 fm.end()
2739 fm.end()
2738
2740
2739 @command(b'perfloadmarkers')
2741 @command(b'perfloadmarkers')
2740 def perfloadmarkers(ui, repo):
2742 def perfloadmarkers(ui, repo):
2741 """benchmark the time to parse the on-disk markers for a repo
2743 """benchmark the time to parse the on-disk markers for a repo
2742
2744
2743 Result is the number of markers in the repo."""
2745 Result is the number of markers in the repo."""
2744 timer, fm = gettimer(ui)
2746 timer, fm = gettimer(ui)
2745 svfs = getsvfs(repo)
2747 svfs = getsvfs(repo)
2746 timer(lambda: len(obsolete.obsstore(svfs)))
2748 timer(lambda: len(obsolete.obsstore(svfs)))
2747 fm.end()
2749 fm.end()
2748
2750
2749 @command(b'perflrucachedict', formatteropts +
2751 @command(b'perflrucachedict', formatteropts +
2750 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2752 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2751 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2753 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2752 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2754 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2753 (b'', b'size', 4, b'size of cache'),
2755 (b'', b'size', 4, b'size of cache'),
2754 (b'', b'gets', 10000, b'number of key lookups'),
2756 (b'', b'gets', 10000, b'number of key lookups'),
2755 (b'', b'sets', 10000, b'number of key sets'),
2757 (b'', b'sets', 10000, b'number of key sets'),
2756 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2758 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2757 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2759 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2758 norepo=True)
2760 norepo=True)
2759 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2761 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2760 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2762 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2761 opts = _byteskwargs(opts)
2763 opts = _byteskwargs(opts)
2762
2764
2763 def doinit():
2765 def doinit():
2764 for i in _xrange(10000):
2766 for i in _xrange(10000):
2765 util.lrucachedict(size)
2767 util.lrucachedict(size)
2766
2768
2767 costrange = list(range(mincost, maxcost + 1))
2769 costrange = list(range(mincost, maxcost + 1))
2768
2770
2769 values = []
2771 values = []
2770 for i in _xrange(size):
2772 for i in _xrange(size):
2771 values.append(random.randint(0, _maxint))
2773 values.append(random.randint(0, _maxint))
2772
2774
2773 # Get mode fills the cache and tests raw lookup performance with no
2775 # Get mode fills the cache and tests raw lookup performance with no
2774 # eviction.
2776 # eviction.
2775 getseq = []
2777 getseq = []
2776 for i in _xrange(gets):
2778 for i in _xrange(gets):
2777 getseq.append(random.choice(values))
2779 getseq.append(random.choice(values))
2778
2780
2779 def dogets():
2781 def dogets():
2780 d = util.lrucachedict(size)
2782 d = util.lrucachedict(size)
2781 for v in values:
2783 for v in values:
2782 d[v] = v
2784 d[v] = v
2783 for key in getseq:
2785 for key in getseq:
2784 value = d[key]
2786 value = d[key]
2785 value # silence pyflakes warning
2787 value # silence pyflakes warning
2786
2788
2787 def dogetscost():
2789 def dogetscost():
2788 d = util.lrucachedict(size, maxcost=costlimit)
2790 d = util.lrucachedict(size, maxcost=costlimit)
2789 for i, v in enumerate(values):
2791 for i, v in enumerate(values):
2790 d.insert(v, v, cost=costs[i])
2792 d.insert(v, v, cost=costs[i])
2791 for key in getseq:
2793 for key in getseq:
2792 try:
2794 try:
2793 value = d[key]
2795 value = d[key]
2794 value # silence pyflakes warning
2796 value # silence pyflakes warning
2795 except KeyError:
2797 except KeyError:
2796 pass
2798 pass
2797
2799
2798 # Set mode tests insertion speed with cache eviction.
2800 # Set mode tests insertion speed with cache eviction.
2799 setseq = []
2801 setseq = []
2800 costs = []
2802 costs = []
2801 for i in _xrange(sets):
2803 for i in _xrange(sets):
2802 setseq.append(random.randint(0, _maxint))
2804 setseq.append(random.randint(0, _maxint))
2803 costs.append(random.choice(costrange))
2805 costs.append(random.choice(costrange))
2804
2806
2805 def doinserts():
2807 def doinserts():
2806 d = util.lrucachedict(size)
2808 d = util.lrucachedict(size)
2807 for v in setseq:
2809 for v in setseq:
2808 d.insert(v, v)
2810 d.insert(v, v)
2809
2811
2810 def doinsertscost():
2812 def doinsertscost():
2811 d = util.lrucachedict(size, maxcost=costlimit)
2813 d = util.lrucachedict(size, maxcost=costlimit)
2812 for i, v in enumerate(setseq):
2814 for i, v in enumerate(setseq):
2813 d.insert(v, v, cost=costs[i])
2815 d.insert(v, v, cost=costs[i])
2814
2816
2815 def dosets():
2817 def dosets():
2816 d = util.lrucachedict(size)
2818 d = util.lrucachedict(size)
2817 for v in setseq:
2819 for v in setseq:
2818 d[v] = v
2820 d[v] = v
2819
2821
2820 # Mixed mode randomly performs gets and sets with eviction.
2822 # Mixed mode randomly performs gets and sets with eviction.
2821 mixedops = []
2823 mixedops = []
2822 for i in _xrange(mixed):
2824 for i in _xrange(mixed):
2823 r = random.randint(0, 100)
2825 r = random.randint(0, 100)
2824 if r < mixedgetfreq:
2826 if r < mixedgetfreq:
2825 op = 0
2827 op = 0
2826 else:
2828 else:
2827 op = 1
2829 op = 1
2828
2830
2829 mixedops.append((op,
2831 mixedops.append((op,
2830 random.randint(0, size * 2),
2832 random.randint(0, size * 2),
2831 random.choice(costrange)))
2833 random.choice(costrange)))
2832
2834
2833 def domixed():
2835 def domixed():
2834 d = util.lrucachedict(size)
2836 d = util.lrucachedict(size)
2835
2837
2836 for op, v, cost in mixedops:
2838 for op, v, cost in mixedops:
2837 if op == 0:
2839 if op == 0:
2838 try:
2840 try:
2839 d[v]
2841 d[v]
2840 except KeyError:
2842 except KeyError:
2841 pass
2843 pass
2842 else:
2844 else:
2843 d[v] = v
2845 d[v] = v
2844
2846
2845 def domixedcost():
2847 def domixedcost():
2846 d = util.lrucachedict(size, maxcost=costlimit)
2848 d = util.lrucachedict(size, maxcost=costlimit)
2847
2849
2848 for op, v, cost in mixedops:
2850 for op, v, cost in mixedops:
2849 if op == 0:
2851 if op == 0:
2850 try:
2852 try:
2851 d[v]
2853 d[v]
2852 except KeyError:
2854 except KeyError:
2853 pass
2855 pass
2854 else:
2856 else:
2855 d.insert(v, v, cost=cost)
2857 d.insert(v, v, cost=cost)
2856
2858
2857 benches = [
2859 benches = [
2858 (doinit, b'init'),
2860 (doinit, b'init'),
2859 ]
2861 ]
2860
2862
2861 if costlimit:
2863 if costlimit:
2862 benches.extend([
2864 benches.extend([
2863 (dogetscost, b'gets w/ cost limit'),
2865 (dogetscost, b'gets w/ cost limit'),
2864 (doinsertscost, b'inserts w/ cost limit'),
2866 (doinsertscost, b'inserts w/ cost limit'),
2865 (domixedcost, b'mixed w/ cost limit'),
2867 (domixedcost, b'mixed w/ cost limit'),
2866 ])
2868 ])
2867 else:
2869 else:
2868 benches.extend([
2870 benches.extend([
2869 (dogets, b'gets'),
2871 (dogets, b'gets'),
2870 (doinserts, b'inserts'),
2872 (doinserts, b'inserts'),
2871 (dosets, b'sets'),
2873 (dosets, b'sets'),
2872 (domixed, b'mixed')
2874 (domixed, b'mixed')
2873 ])
2875 ])
2874
2876
2875 for fn, title in benches:
2877 for fn, title in benches:
2876 timer, fm = gettimer(ui, opts)
2878 timer, fm = gettimer(ui, opts)
2877 timer(fn, title=title)
2879 timer(fn, title=title)
2878 fm.end()
2880 fm.end()
2879
2881
2880 @command(b'perfwrite', formatteropts)
2882 @command(b'perfwrite', formatteropts)
2881 def perfwrite(ui, repo, **opts):
2883 def perfwrite(ui, repo, **opts):
2882 """microbenchmark ui.write
2884 """microbenchmark ui.write
2883 """
2885 """
2884 opts = _byteskwargs(opts)
2886 opts = _byteskwargs(opts)
2885
2887
2886 timer, fm = gettimer(ui, opts)
2888 timer, fm = gettimer(ui, opts)
2887 def write():
2889 def write():
2888 for i in range(100000):
2890 for i in range(100000):
2889 ui.write((b'Testing write performance\n'))
2891 ui.write((b'Testing write performance\n'))
2890 timer(write)
2892 timer(write)
2891 fm.end()
2893 fm.end()
2892
2894
2893 def uisetup(ui):
2895 def uisetup(ui):
2894 if (util.safehasattr(cmdutil, b'openrevlog') and
2896 if (util.safehasattr(cmdutil, b'openrevlog') and
2895 not util.safehasattr(commands, b'debugrevlogopts')):
2897 not util.safehasattr(commands, b'debugrevlogopts')):
2896 # for "historical portability":
2898 # for "historical portability":
2897 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2899 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2898 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2900 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2899 # openrevlog() should cause failure, because it has been
2901 # openrevlog() should cause failure, because it has been
2900 # available since 3.5 (or 49c583ca48c4).
2902 # available since 3.5 (or 49c583ca48c4).
2901 def openrevlog(orig, repo, cmd, file_, opts):
2903 def openrevlog(orig, repo, cmd, file_, opts):
2902 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2904 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2903 raise error.Abort(b"This version doesn't support --dir option",
2905 raise error.Abort(b"This version doesn't support --dir option",
2904 hint=b"use 3.5 or later")
2906 hint=b"use 3.5 or later")
2905 return orig(repo, cmd, file_, opts)
2907 return orig(repo, cmd, file_, opts)
2906 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2908 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2907
2909
2908 @command(b'perfprogress', formatteropts + [
2910 @command(b'perfprogress', formatteropts + [
2909 (b'', b'topic', b'topic', b'topic for progress messages'),
2911 (b'', b'topic', b'topic', b'topic for progress messages'),
2910 (b'c', b'total', 1000000, b'total value we are progressing to'),
2912 (b'c', b'total', 1000000, b'total value we are progressing to'),
2911 ], norepo=True)
2913 ], norepo=True)
2912 def perfprogress(ui, topic=None, total=None, **opts):
2914 def perfprogress(ui, topic=None, total=None, **opts):
2913 """printing of progress bars"""
2915 """printing of progress bars"""
2914 opts = _byteskwargs(opts)
2916 opts = _byteskwargs(opts)
2915
2917
2916 timer, fm = gettimer(ui, opts)
2918 timer, fm = gettimer(ui, opts)
2917
2919
2918 def doprogress():
2920 def doprogress():
2919 with ui.makeprogress(topic, total=total) as progress:
2921 with ui.makeprogress(topic, total=total) as progress:
2920 for i in pycompat.xrange(total):
2922 for i in pycompat.xrange(total):
2921 progress.increment()
2923 progress.increment()
2922
2924
2923 timer(doprogress)
2925 timer(doprogress)
2924 fm.end()
2926 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now