##// END OF EJS Templates
perf: make perf.run-limits code work with Python 3...
Gregory Szorc -
r42230:912d82da default
parent child Browse files
Show More
@@ -1,2858 +1,2858
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``run-limits``
18 ``run-limits``
19 Control the number of runs each benchmark will perform. The option value
19 Control the number of runs each benchmark will perform. The option value
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
21 conditions are considered in order with the following logic:
21 conditions are considered in order with the following logic:
22
22
23 If benchmark has been running for <time> seconds, and we have performed
23 If benchmark has been running for <time> seconds, and we have performed
24 <numberofrun> iterations, stop the benchmark,
24 <numberofrun> iterations, stop the benchmark,
25
25
26 The default value is: `3.0-100, 10.0-3`
26 The default value is: `3.0-100, 10.0-3`
27
27
28 ``stub``
28 ``stub``
29 When set, benchmarks will only be run once, useful for testing
29 When set, benchmarks will only be run once, useful for testing
30 (default: off)
30 (default: off)
31 '''
31 '''
32
32
33 # "historical portability" policy of perf.py:
33 # "historical portability" policy of perf.py:
34 #
34 #
35 # We have to do:
35 # We have to do:
36 # - make perf.py "loadable" with as wide Mercurial version as possible
36 # - make perf.py "loadable" with as wide Mercurial version as possible
37 # This doesn't mean that perf commands work correctly with that Mercurial.
37 # This doesn't mean that perf commands work correctly with that Mercurial.
38 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
38 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
39 # - make historical perf command work correctly with as wide Mercurial
39 # - make historical perf command work correctly with as wide Mercurial
40 # version as possible
40 # version as possible
41 #
41 #
42 # We have to do, if possible with reasonable cost:
42 # We have to do, if possible with reasonable cost:
43 # - make recent perf command for historical feature work correctly
43 # - make recent perf command for historical feature work correctly
44 # with early Mercurial
44 # with early Mercurial
45 #
45 #
46 # We don't have to do:
46 # We don't have to do:
47 # - make perf command for recent feature work correctly with early
47 # - make perf command for recent feature work correctly with early
48 # Mercurial
48 # Mercurial
49
49
50 from __future__ import absolute_import
50 from __future__ import absolute_import
51 import contextlib
51 import contextlib
52 import functools
52 import functools
53 import gc
53 import gc
54 import os
54 import os
55 import random
55 import random
56 import shutil
56 import shutil
57 import struct
57 import struct
58 import sys
58 import sys
59 import tempfile
59 import tempfile
60 import threading
60 import threading
61 import time
61 import time
62 from mercurial import (
62 from mercurial import (
63 changegroup,
63 changegroup,
64 cmdutil,
64 cmdutil,
65 commands,
65 commands,
66 copies,
66 copies,
67 error,
67 error,
68 extensions,
68 extensions,
69 hg,
69 hg,
70 mdiff,
70 mdiff,
71 merge,
71 merge,
72 revlog,
72 revlog,
73 util,
73 util,
74 )
74 )
75
75
76 # for "historical portability":
76 # for "historical portability":
77 # try to import modules separately (in dict order), and ignore
77 # try to import modules separately (in dict order), and ignore
78 # failure, because these aren't available with early Mercurial
78 # failure, because these aren't available with early Mercurial
79 try:
79 try:
80 from mercurial import branchmap # since 2.5 (or bcee63733aad)
80 from mercurial import branchmap # since 2.5 (or bcee63733aad)
81 except ImportError:
81 except ImportError:
82 pass
82 pass
83 try:
83 try:
84 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
84 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
85 except ImportError:
85 except ImportError:
86 pass
86 pass
87 try:
87 try:
88 from mercurial import registrar # since 3.7 (or 37d50250b696)
88 from mercurial import registrar # since 3.7 (or 37d50250b696)
89 dir(registrar) # forcibly load it
89 dir(registrar) # forcibly load it
90 except ImportError:
90 except ImportError:
91 registrar = None
91 registrar = None
92 try:
92 try:
93 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
93 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
94 except ImportError:
94 except ImportError:
95 pass
95 pass
96 try:
96 try:
97 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
97 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
98 except ImportError:
98 except ImportError:
99 pass
99 pass
100 try:
100 try:
101 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
101 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104
104
105
105
106 def identity(a):
106 def identity(a):
107 return a
107 return a
108
108
109 try:
109 try:
110 from mercurial import pycompat
110 from mercurial import pycompat
111 getargspec = pycompat.getargspec # added to module after 4.5
111 getargspec = pycompat.getargspec # added to module after 4.5
112 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
112 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
113 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
113 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
114 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
114 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
115 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
115 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
116 if pycompat.ispy3:
116 if pycompat.ispy3:
117 _maxint = sys.maxsize # per py3 docs for replacing maxint
117 _maxint = sys.maxsize # per py3 docs for replacing maxint
118 else:
118 else:
119 _maxint = sys.maxint
119 _maxint = sys.maxint
120 except (ImportError, AttributeError):
120 except (ImportError, AttributeError):
121 import inspect
121 import inspect
122 getargspec = inspect.getargspec
122 getargspec = inspect.getargspec
123 _byteskwargs = identity
123 _byteskwargs = identity
124 fsencode = identity # no py3 support
124 fsencode = identity # no py3 support
125 _maxint = sys.maxint # no py3 support
125 _maxint = sys.maxint # no py3 support
126 _sysstr = lambda x: x # no py3 support
126 _sysstr = lambda x: x # no py3 support
127 _xrange = xrange
127 _xrange = xrange
128
128
129 try:
129 try:
130 # 4.7+
130 # 4.7+
131 queue = pycompat.queue.Queue
131 queue = pycompat.queue.Queue
132 except (AttributeError, ImportError):
132 except (AttributeError, ImportError):
133 # <4.7.
133 # <4.7.
134 try:
134 try:
135 queue = pycompat.queue
135 queue = pycompat.queue
136 except (AttributeError, ImportError):
136 except (AttributeError, ImportError):
137 queue = util.queue
137 queue = util.queue
138
138
139 try:
139 try:
140 from mercurial import logcmdutil
140 from mercurial import logcmdutil
141 makelogtemplater = logcmdutil.maketemplater
141 makelogtemplater = logcmdutil.maketemplater
142 except (AttributeError, ImportError):
142 except (AttributeError, ImportError):
143 try:
143 try:
144 makelogtemplater = cmdutil.makelogtemplater
144 makelogtemplater = cmdutil.makelogtemplater
145 except (AttributeError, ImportError):
145 except (AttributeError, ImportError):
146 makelogtemplater = None
146 makelogtemplater = None
147
147
148 # for "historical portability":
148 # for "historical portability":
149 # define util.safehasattr forcibly, because util.safehasattr has been
149 # define util.safehasattr forcibly, because util.safehasattr has been
150 # available since 1.9.3 (or 94b200a11cf7)
150 # available since 1.9.3 (or 94b200a11cf7)
151 _undefined = object()
151 _undefined = object()
152 def safehasattr(thing, attr):
152 def safehasattr(thing, attr):
153 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
153 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
154 setattr(util, 'safehasattr', safehasattr)
154 setattr(util, 'safehasattr', safehasattr)
155
155
156 # for "historical portability":
156 # for "historical portability":
157 # define util.timer forcibly, because util.timer has been available
157 # define util.timer forcibly, because util.timer has been available
158 # since ae5d60bb70c9
158 # since ae5d60bb70c9
159 if safehasattr(time, 'perf_counter'):
159 if safehasattr(time, 'perf_counter'):
160 util.timer = time.perf_counter
160 util.timer = time.perf_counter
161 elif os.name == b'nt':
161 elif os.name == b'nt':
162 util.timer = time.clock
162 util.timer = time.clock
163 else:
163 else:
164 util.timer = time.time
164 util.timer = time.time
165
165
166 # for "historical portability":
166 # for "historical portability":
167 # use locally defined empty option list, if formatteropts isn't
167 # use locally defined empty option list, if formatteropts isn't
168 # available, because commands.formatteropts has been available since
168 # available, because commands.formatteropts has been available since
169 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
169 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
170 # available since 2.2 (or ae5f92e154d3)
170 # available since 2.2 (or ae5f92e154d3)
171 formatteropts = getattr(cmdutil, "formatteropts",
171 formatteropts = getattr(cmdutil, "formatteropts",
172 getattr(commands, "formatteropts", []))
172 getattr(commands, "formatteropts", []))
173
173
174 # for "historical portability":
174 # for "historical portability":
175 # use locally defined option list, if debugrevlogopts isn't available,
175 # use locally defined option list, if debugrevlogopts isn't available,
176 # because commands.debugrevlogopts has been available since 3.7 (or
176 # because commands.debugrevlogopts has been available since 3.7 (or
177 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
177 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
178 # since 1.9 (or a79fea6b3e77).
178 # since 1.9 (or a79fea6b3e77).
179 revlogopts = getattr(cmdutil, "debugrevlogopts",
179 revlogopts = getattr(cmdutil, "debugrevlogopts",
180 getattr(commands, "debugrevlogopts", [
180 getattr(commands, "debugrevlogopts", [
181 (b'c', b'changelog', False, (b'open changelog')),
181 (b'c', b'changelog', False, (b'open changelog')),
182 (b'm', b'manifest', False, (b'open manifest')),
182 (b'm', b'manifest', False, (b'open manifest')),
183 (b'', b'dir', False, (b'open directory manifest')),
183 (b'', b'dir', False, (b'open directory manifest')),
184 ]))
184 ]))
185
185
186 cmdtable = {}
186 cmdtable = {}
187
187
188 # for "historical portability":
188 # for "historical portability":
189 # define parsealiases locally, because cmdutil.parsealiases has been
189 # define parsealiases locally, because cmdutil.parsealiases has been
190 # available since 1.5 (or 6252852b4332)
190 # available since 1.5 (or 6252852b4332)
191 def parsealiases(cmd):
191 def parsealiases(cmd):
192 return cmd.split(b"|")
192 return cmd.split(b"|")
193
193
194 if safehasattr(registrar, 'command'):
194 if safehasattr(registrar, 'command'):
195 command = registrar.command(cmdtable)
195 command = registrar.command(cmdtable)
196 elif safehasattr(cmdutil, 'command'):
196 elif safehasattr(cmdutil, 'command'):
197 command = cmdutil.command(cmdtable)
197 command = cmdutil.command(cmdtable)
198 if b'norepo' not in getargspec(command).args:
198 if b'norepo' not in getargspec(command).args:
199 # for "historical portability":
199 # for "historical portability":
200 # wrap original cmdutil.command, because "norepo" option has
200 # wrap original cmdutil.command, because "norepo" option has
201 # been available since 3.1 (or 75a96326cecb)
201 # been available since 3.1 (or 75a96326cecb)
202 _command = command
202 _command = command
203 def command(name, options=(), synopsis=None, norepo=False):
203 def command(name, options=(), synopsis=None, norepo=False):
204 if norepo:
204 if norepo:
205 commands.norepo += b' %s' % b' '.join(parsealiases(name))
205 commands.norepo += b' %s' % b' '.join(parsealiases(name))
206 return _command(name, list(options), synopsis)
206 return _command(name, list(options), synopsis)
207 else:
207 else:
208 # for "historical portability":
208 # for "historical portability":
209 # define "@command" annotation locally, because cmdutil.command
209 # define "@command" annotation locally, because cmdutil.command
210 # has been available since 1.9 (or 2daa5179e73f)
210 # has been available since 1.9 (or 2daa5179e73f)
211 def command(name, options=(), synopsis=None, norepo=False):
211 def command(name, options=(), synopsis=None, norepo=False):
212 def decorator(func):
212 def decorator(func):
213 if synopsis:
213 if synopsis:
214 cmdtable[name] = func, list(options), synopsis
214 cmdtable[name] = func, list(options), synopsis
215 else:
215 else:
216 cmdtable[name] = func, list(options)
216 cmdtable[name] = func, list(options)
217 if norepo:
217 if norepo:
218 commands.norepo += b' %s' % b' '.join(parsealiases(name))
218 commands.norepo += b' %s' % b' '.join(parsealiases(name))
219 return func
219 return func
220 return decorator
220 return decorator
221
221
222 try:
222 try:
223 import mercurial.registrar
223 import mercurial.registrar
224 import mercurial.configitems
224 import mercurial.configitems
225 configtable = {}
225 configtable = {}
226 configitem = mercurial.registrar.configitem(configtable)
226 configitem = mercurial.registrar.configitem(configtable)
227 configitem(b'perf', b'presleep',
227 configitem(b'perf', b'presleep',
228 default=mercurial.configitems.dynamicdefault,
228 default=mercurial.configitems.dynamicdefault,
229 )
229 )
230 configitem(b'perf', b'stub',
230 configitem(b'perf', b'stub',
231 default=mercurial.configitems.dynamicdefault,
231 default=mercurial.configitems.dynamicdefault,
232 )
232 )
233 configitem(b'perf', b'parentscount',
233 configitem(b'perf', b'parentscount',
234 default=mercurial.configitems.dynamicdefault,
234 default=mercurial.configitems.dynamicdefault,
235 )
235 )
236 configitem(b'perf', b'all-timing',
236 configitem(b'perf', b'all-timing',
237 default=mercurial.configitems.dynamicdefault,
237 default=mercurial.configitems.dynamicdefault,
238 )
238 )
239 configitem(b'perf', b'run-limits',
239 configitem(b'perf', b'run-limits',
240 default=mercurial.configitems.dynamicdefault,
240 default=mercurial.configitems.dynamicdefault,
241 )
241 )
242 except (ImportError, AttributeError):
242 except (ImportError, AttributeError):
243 pass
243 pass
244
244
245 def getlen(ui):
245 def getlen(ui):
246 if ui.configbool(b"perf", b"stub", False):
246 if ui.configbool(b"perf", b"stub", False):
247 return lambda x: 1
247 return lambda x: 1
248 return len
248 return len
249
249
250 def gettimer(ui, opts=None):
250 def gettimer(ui, opts=None):
251 """return a timer function and formatter: (timer, formatter)
251 """return a timer function and formatter: (timer, formatter)
252
252
253 This function exists to gather the creation of formatter in a single
253 This function exists to gather the creation of formatter in a single
254 place instead of duplicating it in all performance commands."""
254 place instead of duplicating it in all performance commands."""
255
255
256 # enforce an idle period before execution to counteract power management
256 # enforce an idle period before execution to counteract power management
257 # experimental config: perf.presleep
257 # experimental config: perf.presleep
258 time.sleep(getint(ui, b"perf", b"presleep", 1))
258 time.sleep(getint(ui, b"perf", b"presleep", 1))
259
259
260 if opts is None:
260 if opts is None:
261 opts = {}
261 opts = {}
262 # redirect all to stderr unless buffer api is in use
262 # redirect all to stderr unless buffer api is in use
263 if not ui._buffers:
263 if not ui._buffers:
264 ui = ui.copy()
264 ui = ui.copy()
265 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
265 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
266 if uifout:
266 if uifout:
267 # for "historical portability":
267 # for "historical portability":
268 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
268 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
269 uifout.set(ui.ferr)
269 uifout.set(ui.ferr)
270
270
271 # get a formatter
271 # get a formatter
272 uiformatter = getattr(ui, 'formatter', None)
272 uiformatter = getattr(ui, 'formatter', None)
273 if uiformatter:
273 if uiformatter:
274 fm = uiformatter(b'perf', opts)
274 fm = uiformatter(b'perf', opts)
275 else:
275 else:
276 # for "historical portability":
276 # for "historical portability":
277 # define formatter locally, because ui.formatter has been
277 # define formatter locally, because ui.formatter has been
278 # available since 2.2 (or ae5f92e154d3)
278 # available since 2.2 (or ae5f92e154d3)
279 from mercurial import node
279 from mercurial import node
280 class defaultformatter(object):
280 class defaultformatter(object):
281 """Minimized composition of baseformatter and plainformatter
281 """Minimized composition of baseformatter and plainformatter
282 """
282 """
283 def __init__(self, ui, topic, opts):
283 def __init__(self, ui, topic, opts):
284 self._ui = ui
284 self._ui = ui
285 if ui.debugflag:
285 if ui.debugflag:
286 self.hexfunc = node.hex
286 self.hexfunc = node.hex
287 else:
287 else:
288 self.hexfunc = node.short
288 self.hexfunc = node.short
289 def __nonzero__(self):
289 def __nonzero__(self):
290 return False
290 return False
291 __bool__ = __nonzero__
291 __bool__ = __nonzero__
292 def startitem(self):
292 def startitem(self):
293 pass
293 pass
294 def data(self, **data):
294 def data(self, **data):
295 pass
295 pass
296 def write(self, fields, deftext, *fielddata, **opts):
296 def write(self, fields, deftext, *fielddata, **opts):
297 self._ui.write(deftext % fielddata, **opts)
297 self._ui.write(deftext % fielddata, **opts)
298 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
298 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
299 if cond:
299 if cond:
300 self._ui.write(deftext % fielddata, **opts)
300 self._ui.write(deftext % fielddata, **opts)
301 def plain(self, text, **opts):
301 def plain(self, text, **opts):
302 self._ui.write(text, **opts)
302 self._ui.write(text, **opts)
303 def end(self):
303 def end(self):
304 pass
304 pass
305 fm = defaultformatter(ui, b'perf', opts)
305 fm = defaultformatter(ui, b'perf', opts)
306
306
307 # stub function, runs code only once instead of in a loop
307 # stub function, runs code only once instead of in a loop
308 # experimental config: perf.stub
308 # experimental config: perf.stub
309 if ui.configbool(b"perf", b"stub", False):
309 if ui.configbool(b"perf", b"stub", False):
310 return functools.partial(stub_timer, fm), fm
310 return functools.partial(stub_timer, fm), fm
311
311
312 # experimental config: perf.all-timing
312 # experimental config: perf.all-timing
313 displayall = ui.configbool(b"perf", b"all-timing", False)
313 displayall = ui.configbool(b"perf", b"all-timing", False)
314
314
315 # experimental config: perf.run-limits
315 # experimental config: perf.run-limits
316 limitspec = ui.configlist(b"perf", b"run-limits", [])
316 limitspec = ui.configlist(b"perf", b"run-limits", [])
317 limits = []
317 limits = []
318 for item in limitspec:
318 for item in limitspec:
319 parts = item.split('-', 1)
319 parts = item.split(b'-', 1)
320 if len(parts) < 2:
320 if len(parts) < 2:
321 ui.warn(('malformatted run limit entry, missing "-": %s\n'
321 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
322 % item))
322 % item))
323 continue
323 continue
324 try:
324 try:
325 time_limit = float(parts[0])
325 time_limit = float(pycompat.sysstr(parts[0]))
326 except ValueError as e:
326 except ValueError as e:
327 ui.warn(('malformatted run limit entry, %s: %s\n'
327 ui.warn((b'malformatted run limit entry, %s: %s\n'
328 % (e, item)))
328 % (pycompat.bytestr(e), item)))
329 continue
329 continue
330 try:
330 try:
331 run_limit = int(parts[1])
331 run_limit = int(pycompat.sysstr(parts[1]))
332 except ValueError as e:
332 except ValueError as e:
333 ui.warn(('malformatted run limit entry, %s: %s\n'
333 ui.warn((b'malformatted run limit entry, %s: %s\n'
334 % (e, item)))
334 % (pycompat.bytestr(e), item)))
335 continue
335 continue
336 limits.append((time_limit, run_limit))
336 limits.append((time_limit, run_limit))
337 if not limits:
337 if not limits:
338 limits = DEFAULTLIMITS
338 limits = DEFAULTLIMITS
339
339
340 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
340 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
341 return t, fm
341 return t, fm
342
342
343 def stub_timer(fm, func, setup=None, title=None):
343 def stub_timer(fm, func, setup=None, title=None):
344 if setup is not None:
344 if setup is not None:
345 setup()
345 setup()
346 func()
346 func()
347
347
348 @contextlib.contextmanager
348 @contextlib.contextmanager
349 def timeone():
349 def timeone():
350 r = []
350 r = []
351 ostart = os.times()
351 ostart = os.times()
352 cstart = util.timer()
352 cstart = util.timer()
353 yield r
353 yield r
354 cstop = util.timer()
354 cstop = util.timer()
355 ostop = os.times()
355 ostop = os.times()
356 a, b = ostart, ostop
356 a, b = ostart, ostop
357 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
357 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
358
358
359
359
360 # list of stop condition (elapsed time, minimal run count)
360 # list of stop condition (elapsed time, minimal run count)
361 DEFAULTLIMITS = (
361 DEFAULTLIMITS = (
362 (3.0, 100),
362 (3.0, 100),
363 (10.0, 3),
363 (10.0, 3),
364 )
364 )
365
365
366 def _timer(fm, func, setup=None, title=None, displayall=False,
366 def _timer(fm, func, setup=None, title=None, displayall=False,
367 limits=DEFAULTLIMITS):
367 limits=DEFAULTLIMITS):
368 gc.collect()
368 gc.collect()
369 results = []
369 results = []
370 begin = util.timer()
370 begin = util.timer()
371 count = 0
371 count = 0
372 keepgoing = True
372 keepgoing = True
373 while keepgoing:
373 while keepgoing:
374 if setup is not None:
374 if setup is not None:
375 setup()
375 setup()
376 with timeone() as item:
376 with timeone() as item:
377 r = func()
377 r = func()
378 count += 1
378 count += 1
379 results.append(item[0])
379 results.append(item[0])
380 cstop = util.timer()
380 cstop = util.timer()
381 # Look for a stop condition.
381 # Look for a stop condition.
382 elapsed = cstop - begin
382 elapsed = cstop - begin
383 for t, mincount in limits:
383 for t, mincount in limits:
384 if elapsed >= t and count >= mincount:
384 if elapsed >= t and count >= mincount:
385 keepgoing = False
385 keepgoing = False
386 break
386 break
387
387
388 formatone(fm, results, title=title, result=r,
388 formatone(fm, results, title=title, result=r,
389 displayall=displayall)
389 displayall=displayall)
390
390
391 def formatone(fm, timings, title=None, result=None, displayall=False):
391 def formatone(fm, timings, title=None, result=None, displayall=False):
392
392
393 count = len(timings)
393 count = len(timings)
394
394
395 fm.startitem()
395 fm.startitem()
396
396
397 if title:
397 if title:
398 fm.write(b'title', b'! %s\n', title)
398 fm.write(b'title', b'! %s\n', title)
399 if result:
399 if result:
400 fm.write(b'result', b'! result: %s\n', result)
400 fm.write(b'result', b'! result: %s\n', result)
401 def display(role, entry):
401 def display(role, entry):
402 prefix = b''
402 prefix = b''
403 if role != b'best':
403 if role != b'best':
404 prefix = b'%s.' % role
404 prefix = b'%s.' % role
405 fm.plain(b'!')
405 fm.plain(b'!')
406 fm.write(prefix + b'wall', b' wall %f', entry[0])
406 fm.write(prefix + b'wall', b' wall %f', entry[0])
407 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
407 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
408 fm.write(prefix + b'user', b' user %f', entry[1])
408 fm.write(prefix + b'user', b' user %f', entry[1])
409 fm.write(prefix + b'sys', b' sys %f', entry[2])
409 fm.write(prefix + b'sys', b' sys %f', entry[2])
410 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
410 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
411 fm.plain(b'\n')
411 fm.plain(b'\n')
412 timings.sort()
412 timings.sort()
413 min_val = timings[0]
413 min_val = timings[0]
414 display(b'best', min_val)
414 display(b'best', min_val)
415 if displayall:
415 if displayall:
416 max_val = timings[-1]
416 max_val = timings[-1]
417 display(b'max', max_val)
417 display(b'max', max_val)
418 avg = tuple([sum(x) / count for x in zip(*timings)])
418 avg = tuple([sum(x) / count for x in zip(*timings)])
419 display(b'avg', avg)
419 display(b'avg', avg)
420 median = timings[len(timings) // 2]
420 median = timings[len(timings) // 2]
421 display(b'median', median)
421 display(b'median', median)
422
422
423 # utilities for historical portability
423 # utilities for historical portability
424
424
425 def getint(ui, section, name, default):
425 def getint(ui, section, name, default):
426 # for "historical portability":
426 # for "historical portability":
427 # ui.configint has been available since 1.9 (or fa2b596db182)
427 # ui.configint has been available since 1.9 (or fa2b596db182)
428 v = ui.config(section, name, None)
428 v = ui.config(section, name, None)
429 if v is None:
429 if v is None:
430 return default
430 return default
431 try:
431 try:
432 return int(v)
432 return int(v)
433 except ValueError:
433 except ValueError:
434 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
434 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
435 % (section, name, v))
435 % (section, name, v))
436
436
437 def safeattrsetter(obj, name, ignoremissing=False):
437 def safeattrsetter(obj, name, ignoremissing=False):
438 """Ensure that 'obj' has 'name' attribute before subsequent setattr
438 """Ensure that 'obj' has 'name' attribute before subsequent setattr
439
439
440 This function is aborted, if 'obj' doesn't have 'name' attribute
440 This function is aborted, if 'obj' doesn't have 'name' attribute
441 at runtime. This avoids overlooking removal of an attribute, which
441 at runtime. This avoids overlooking removal of an attribute, which
442 breaks assumption of performance measurement, in the future.
442 breaks assumption of performance measurement, in the future.
443
443
444 This function returns the object to (1) assign a new value, and
444 This function returns the object to (1) assign a new value, and
445 (2) restore an original value to the attribute.
445 (2) restore an original value to the attribute.
446
446
447 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
447 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
448 abortion, and this function returns None. This is useful to
448 abortion, and this function returns None. This is useful to
449 examine an attribute, which isn't ensured in all Mercurial
449 examine an attribute, which isn't ensured in all Mercurial
450 versions.
450 versions.
451 """
451 """
452 if not util.safehasattr(obj, name):
452 if not util.safehasattr(obj, name):
453 if ignoremissing:
453 if ignoremissing:
454 return None
454 return None
455 raise error.Abort((b"missing attribute %s of %s might break assumption"
455 raise error.Abort((b"missing attribute %s of %s might break assumption"
456 b" of performance measurement") % (name, obj))
456 b" of performance measurement") % (name, obj))
457
457
458 origvalue = getattr(obj, _sysstr(name))
458 origvalue = getattr(obj, _sysstr(name))
459 class attrutil(object):
459 class attrutil(object):
460 def set(self, newvalue):
460 def set(self, newvalue):
461 setattr(obj, _sysstr(name), newvalue)
461 setattr(obj, _sysstr(name), newvalue)
462 def restore(self):
462 def restore(self):
463 setattr(obj, _sysstr(name), origvalue)
463 setattr(obj, _sysstr(name), origvalue)
464
464
465 return attrutil()
465 return attrutil()
466
466
467 # utilities to examine each internal API changes
467 # utilities to examine each internal API changes
468
468
469 def getbranchmapsubsettable():
469 def getbranchmapsubsettable():
470 # for "historical portability":
470 # for "historical portability":
471 # subsettable is defined in:
471 # subsettable is defined in:
472 # - branchmap since 2.9 (or 175c6fd8cacc)
472 # - branchmap since 2.9 (or 175c6fd8cacc)
473 # - repoview since 2.5 (or 59a9f18d4587)
473 # - repoview since 2.5 (or 59a9f18d4587)
474 for mod in (branchmap, repoview):
474 for mod in (branchmap, repoview):
475 subsettable = getattr(mod, 'subsettable', None)
475 subsettable = getattr(mod, 'subsettable', None)
476 if subsettable:
476 if subsettable:
477 return subsettable
477 return subsettable
478
478
479 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
479 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
480 # branchmap and repoview modules exist, but subsettable attribute
480 # branchmap and repoview modules exist, but subsettable attribute
481 # doesn't)
481 # doesn't)
482 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
482 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
483 hint=b"use 2.5 or later")
483 hint=b"use 2.5 or later")
484
484
485 def getsvfs(repo):
485 def getsvfs(repo):
486 """Return appropriate object to access files under .hg/store
486 """Return appropriate object to access files under .hg/store
487 """
487 """
488 # for "historical portability":
488 # for "historical portability":
489 # repo.svfs has been available since 2.3 (or 7034365089bf)
489 # repo.svfs has been available since 2.3 (or 7034365089bf)
490 svfs = getattr(repo, 'svfs', None)
490 svfs = getattr(repo, 'svfs', None)
491 if svfs:
491 if svfs:
492 return svfs
492 return svfs
493 else:
493 else:
494 return getattr(repo, 'sopener')
494 return getattr(repo, 'sopener')
495
495
496 def getvfs(repo):
496 def getvfs(repo):
497 """Return appropriate object to access files under .hg
497 """Return appropriate object to access files under .hg
498 """
498 """
499 # for "historical portability":
499 # for "historical portability":
500 # repo.vfs has been available since 2.3 (or 7034365089bf)
500 # repo.vfs has been available since 2.3 (or 7034365089bf)
501 vfs = getattr(repo, 'vfs', None)
501 vfs = getattr(repo, 'vfs', None)
502 if vfs:
502 if vfs:
503 return vfs
503 return vfs
504 else:
504 else:
505 return getattr(repo, 'opener')
505 return getattr(repo, 'opener')
506
506
507 def repocleartagscachefunc(repo):
507 def repocleartagscachefunc(repo):
508 """Return the function to clear tags cache according to repo internal API
508 """Return the function to clear tags cache according to repo internal API
509 """
509 """
510 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
510 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
511 # in this case, setattr(repo, '_tagscache', None) or so isn't
511 # in this case, setattr(repo, '_tagscache', None) or so isn't
512 # correct way to clear tags cache, because existing code paths
512 # correct way to clear tags cache, because existing code paths
513 # expect _tagscache to be a structured object.
513 # expect _tagscache to be a structured object.
514 def clearcache():
514 def clearcache():
515 # _tagscache has been filteredpropertycache since 2.5 (or
515 # _tagscache has been filteredpropertycache since 2.5 (or
516 # 98c867ac1330), and delattr() can't work in such case
516 # 98c867ac1330), and delattr() can't work in such case
517 if b'_tagscache' in vars(repo):
517 if b'_tagscache' in vars(repo):
518 del repo.__dict__[b'_tagscache']
518 del repo.__dict__[b'_tagscache']
519 return clearcache
519 return clearcache
520
520
521 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
521 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
522 if repotags: # since 1.4 (or 5614a628d173)
522 if repotags: # since 1.4 (or 5614a628d173)
523 return lambda : repotags.set(None)
523 return lambda : repotags.set(None)
524
524
525 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
525 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
526 if repotagscache: # since 0.6 (or d7df759d0e97)
526 if repotagscache: # since 0.6 (or d7df759d0e97)
527 return lambda : repotagscache.set(None)
527 return lambda : repotagscache.set(None)
528
528
529 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
529 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
530 # this point, but it isn't so problematic, because:
530 # this point, but it isn't so problematic, because:
531 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
531 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
532 # in perftags() causes failure soon
532 # in perftags() causes failure soon
533 # - perf.py itself has been available since 1.1 (or eb240755386d)
533 # - perf.py itself has been available since 1.1 (or eb240755386d)
534 raise error.Abort((b"tags API of this hg command is unknown"))
534 raise error.Abort((b"tags API of this hg command is unknown"))
535
535
536 # utilities to clear cache
536 # utilities to clear cache
537
537
538 def clearfilecache(obj, attrname):
538 def clearfilecache(obj, attrname):
539 unfiltered = getattr(obj, 'unfiltered', None)
539 unfiltered = getattr(obj, 'unfiltered', None)
540 if unfiltered is not None:
540 if unfiltered is not None:
541 obj = obj.unfiltered()
541 obj = obj.unfiltered()
542 if attrname in vars(obj):
542 if attrname in vars(obj):
543 delattr(obj, attrname)
543 delattr(obj, attrname)
544 obj._filecache.pop(attrname, None)
544 obj._filecache.pop(attrname, None)
545
545
546 def clearchangelog(repo):
546 def clearchangelog(repo):
547 if repo is not repo.unfiltered():
547 if repo is not repo.unfiltered():
548 object.__setattr__(repo, r'_clcachekey', None)
548 object.__setattr__(repo, r'_clcachekey', None)
549 object.__setattr__(repo, r'_clcache', None)
549 object.__setattr__(repo, r'_clcache', None)
550 clearfilecache(repo.unfiltered(), 'changelog')
550 clearfilecache(repo.unfiltered(), 'changelog')
551
551
552 # perf commands
552 # perf commands
553
553
554 @command(b'perfwalk', formatteropts)
554 @command(b'perfwalk', formatteropts)
555 def perfwalk(ui, repo, *pats, **opts):
555 def perfwalk(ui, repo, *pats, **opts):
556 opts = _byteskwargs(opts)
556 opts = _byteskwargs(opts)
557 timer, fm = gettimer(ui, opts)
557 timer, fm = gettimer(ui, opts)
558 m = scmutil.match(repo[None], pats, {})
558 m = scmutil.match(repo[None], pats, {})
559 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
559 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
560 ignored=False))))
560 ignored=False))))
561 fm.end()
561 fm.end()
562
562
563 @command(b'perfannotate', formatteropts)
563 @command(b'perfannotate', formatteropts)
564 def perfannotate(ui, repo, f, **opts):
564 def perfannotate(ui, repo, f, **opts):
565 opts = _byteskwargs(opts)
565 opts = _byteskwargs(opts)
566 timer, fm = gettimer(ui, opts)
566 timer, fm = gettimer(ui, opts)
567 fc = repo[b'.'][f]
567 fc = repo[b'.'][f]
568 timer(lambda: len(fc.annotate(True)))
568 timer(lambda: len(fc.annotate(True)))
569 fm.end()
569 fm.end()
570
570
571 @command(b'perfstatus',
571 @command(b'perfstatus',
572 [(b'u', b'unknown', False,
572 [(b'u', b'unknown', False,
573 b'ask status to look for unknown files')] + formatteropts)
573 b'ask status to look for unknown files')] + formatteropts)
574 def perfstatus(ui, repo, **opts):
574 def perfstatus(ui, repo, **opts):
575 opts = _byteskwargs(opts)
575 opts = _byteskwargs(opts)
576 #m = match.always(repo.root, repo.getcwd())
576 #m = match.always(repo.root, repo.getcwd())
577 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
577 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
578 # False))))
578 # False))))
579 timer, fm = gettimer(ui, opts)
579 timer, fm = gettimer(ui, opts)
580 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
580 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
581 fm.end()
581 fm.end()
582
582
583 @command(b'perfaddremove', formatteropts)
583 @command(b'perfaddremove', formatteropts)
584 def perfaddremove(ui, repo, **opts):
584 def perfaddremove(ui, repo, **opts):
585 opts = _byteskwargs(opts)
585 opts = _byteskwargs(opts)
586 timer, fm = gettimer(ui, opts)
586 timer, fm = gettimer(ui, opts)
587 try:
587 try:
588 oldquiet = repo.ui.quiet
588 oldquiet = repo.ui.quiet
589 repo.ui.quiet = True
589 repo.ui.quiet = True
590 matcher = scmutil.match(repo[None])
590 matcher = scmutil.match(repo[None])
591 opts[b'dry_run'] = True
591 opts[b'dry_run'] = True
592 if b'uipathfn' in getargspec(scmutil.addremove).args:
592 if b'uipathfn' in getargspec(scmutil.addremove).args:
593 uipathfn = scmutil.getuipathfn(repo)
593 uipathfn = scmutil.getuipathfn(repo)
594 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
594 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
595 else:
595 else:
596 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
596 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
597 finally:
597 finally:
598 repo.ui.quiet = oldquiet
598 repo.ui.quiet = oldquiet
599 fm.end()
599 fm.end()
600
600
601 def clearcaches(cl):
601 def clearcaches(cl):
602 # behave somewhat consistently across internal API changes
602 # behave somewhat consistently across internal API changes
603 if util.safehasattr(cl, b'clearcaches'):
603 if util.safehasattr(cl, b'clearcaches'):
604 cl.clearcaches()
604 cl.clearcaches()
605 elif util.safehasattr(cl, b'_nodecache'):
605 elif util.safehasattr(cl, b'_nodecache'):
606 from mercurial.node import nullid, nullrev
606 from mercurial.node import nullid, nullrev
607 cl._nodecache = {nullid: nullrev}
607 cl._nodecache = {nullid: nullrev}
608 cl._nodepos = None
608 cl._nodepos = None
609
609
610 @command(b'perfheads', formatteropts)
610 @command(b'perfheads', formatteropts)
611 def perfheads(ui, repo, **opts):
611 def perfheads(ui, repo, **opts):
612 """benchmark the computation of a changelog heads"""
612 """benchmark the computation of a changelog heads"""
613 opts = _byteskwargs(opts)
613 opts = _byteskwargs(opts)
614 timer, fm = gettimer(ui, opts)
614 timer, fm = gettimer(ui, opts)
615 cl = repo.changelog
615 cl = repo.changelog
616 def s():
616 def s():
617 clearcaches(cl)
617 clearcaches(cl)
618 def d():
618 def d():
619 len(cl.headrevs())
619 len(cl.headrevs())
620 timer(d, setup=s)
620 timer(d, setup=s)
621 fm.end()
621 fm.end()
622
622
623 @command(b'perftags', formatteropts+
623 @command(b'perftags', formatteropts+
624 [
624 [
625 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
625 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
626 ])
626 ])
627 def perftags(ui, repo, **opts):
627 def perftags(ui, repo, **opts):
628 opts = _byteskwargs(opts)
628 opts = _byteskwargs(opts)
629 timer, fm = gettimer(ui, opts)
629 timer, fm = gettimer(ui, opts)
630 repocleartagscache = repocleartagscachefunc(repo)
630 repocleartagscache = repocleartagscachefunc(repo)
631 clearrevlogs = opts[b'clear_revlogs']
631 clearrevlogs = opts[b'clear_revlogs']
632 def s():
632 def s():
633 if clearrevlogs:
633 if clearrevlogs:
634 clearchangelog(repo)
634 clearchangelog(repo)
635 clearfilecache(repo.unfiltered(), 'manifest')
635 clearfilecache(repo.unfiltered(), 'manifest')
636 repocleartagscache()
636 repocleartagscache()
637 def t():
637 def t():
638 return len(repo.tags())
638 return len(repo.tags())
639 timer(t, setup=s)
639 timer(t, setup=s)
640 fm.end()
640 fm.end()
641
641
642 @command(b'perfancestors', formatteropts)
642 @command(b'perfancestors', formatteropts)
643 def perfancestors(ui, repo, **opts):
643 def perfancestors(ui, repo, **opts):
644 opts = _byteskwargs(opts)
644 opts = _byteskwargs(opts)
645 timer, fm = gettimer(ui, opts)
645 timer, fm = gettimer(ui, opts)
646 heads = repo.changelog.headrevs()
646 heads = repo.changelog.headrevs()
647 def d():
647 def d():
648 for a in repo.changelog.ancestors(heads):
648 for a in repo.changelog.ancestors(heads):
649 pass
649 pass
650 timer(d)
650 timer(d)
651 fm.end()
651 fm.end()
652
652
653 @command(b'perfancestorset', formatteropts)
653 @command(b'perfancestorset', formatteropts)
654 def perfancestorset(ui, repo, revset, **opts):
654 def perfancestorset(ui, repo, revset, **opts):
655 opts = _byteskwargs(opts)
655 opts = _byteskwargs(opts)
656 timer, fm = gettimer(ui, opts)
656 timer, fm = gettimer(ui, opts)
657 revs = repo.revs(revset)
657 revs = repo.revs(revset)
658 heads = repo.changelog.headrevs()
658 heads = repo.changelog.headrevs()
659 def d():
659 def d():
660 s = repo.changelog.ancestors(heads)
660 s = repo.changelog.ancestors(heads)
661 for rev in revs:
661 for rev in revs:
662 rev in s
662 rev in s
663 timer(d)
663 timer(d)
664 fm.end()
664 fm.end()
665
665
666 @command(b'perfdiscovery', formatteropts, b'PATH')
666 @command(b'perfdiscovery', formatteropts, b'PATH')
667 def perfdiscovery(ui, repo, path, **opts):
667 def perfdiscovery(ui, repo, path, **opts):
668 """benchmark discovery between local repo and the peer at given path
668 """benchmark discovery between local repo and the peer at given path
669 """
669 """
670 repos = [repo, None]
670 repos = [repo, None]
671 timer, fm = gettimer(ui, opts)
671 timer, fm = gettimer(ui, opts)
672 path = ui.expandpath(path)
672 path = ui.expandpath(path)
673
673
674 def s():
674 def s():
675 repos[1] = hg.peer(ui, opts, path)
675 repos[1] = hg.peer(ui, opts, path)
676 def d():
676 def d():
677 setdiscovery.findcommonheads(ui, *repos)
677 setdiscovery.findcommonheads(ui, *repos)
678 timer(d, setup=s)
678 timer(d, setup=s)
679 fm.end()
679 fm.end()
680
680
681 @command(b'perfbookmarks', formatteropts +
681 @command(b'perfbookmarks', formatteropts +
682 [
682 [
683 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
683 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
684 ])
684 ])
685 def perfbookmarks(ui, repo, **opts):
685 def perfbookmarks(ui, repo, **opts):
686 """benchmark parsing bookmarks from disk to memory"""
686 """benchmark parsing bookmarks from disk to memory"""
687 opts = _byteskwargs(opts)
687 opts = _byteskwargs(opts)
688 timer, fm = gettimer(ui, opts)
688 timer, fm = gettimer(ui, opts)
689
689
690 clearrevlogs = opts[b'clear_revlogs']
690 clearrevlogs = opts[b'clear_revlogs']
691 def s():
691 def s():
692 if clearrevlogs:
692 if clearrevlogs:
693 clearchangelog(repo)
693 clearchangelog(repo)
694 clearfilecache(repo, b'_bookmarks')
694 clearfilecache(repo, b'_bookmarks')
695 def d():
695 def d():
696 repo._bookmarks
696 repo._bookmarks
697 timer(d, setup=s)
697 timer(d, setup=s)
698 fm.end()
698 fm.end()
699
699
700 @command(b'perfbundleread', formatteropts, b'BUNDLE')
700 @command(b'perfbundleread', formatteropts, b'BUNDLE')
701 def perfbundleread(ui, repo, bundlepath, **opts):
701 def perfbundleread(ui, repo, bundlepath, **opts):
702 """Benchmark reading of bundle files.
702 """Benchmark reading of bundle files.
703
703
704 This command is meant to isolate the I/O part of bundle reading as
704 This command is meant to isolate the I/O part of bundle reading as
705 much as possible.
705 much as possible.
706 """
706 """
707 from mercurial import (
707 from mercurial import (
708 bundle2,
708 bundle2,
709 exchange,
709 exchange,
710 streamclone,
710 streamclone,
711 )
711 )
712
712
713 opts = _byteskwargs(opts)
713 opts = _byteskwargs(opts)
714
714
715 def makebench(fn):
715 def makebench(fn):
716 def run():
716 def run():
717 with open(bundlepath, b'rb') as fh:
717 with open(bundlepath, b'rb') as fh:
718 bundle = exchange.readbundle(ui, fh, bundlepath)
718 bundle = exchange.readbundle(ui, fh, bundlepath)
719 fn(bundle)
719 fn(bundle)
720
720
721 return run
721 return run
722
722
723 def makereadnbytes(size):
723 def makereadnbytes(size):
724 def run():
724 def run():
725 with open(bundlepath, b'rb') as fh:
725 with open(bundlepath, b'rb') as fh:
726 bundle = exchange.readbundle(ui, fh, bundlepath)
726 bundle = exchange.readbundle(ui, fh, bundlepath)
727 while bundle.read(size):
727 while bundle.read(size):
728 pass
728 pass
729
729
730 return run
730 return run
731
731
732 def makestdioread(size):
732 def makestdioread(size):
733 def run():
733 def run():
734 with open(bundlepath, b'rb') as fh:
734 with open(bundlepath, b'rb') as fh:
735 while fh.read(size):
735 while fh.read(size):
736 pass
736 pass
737
737
738 return run
738 return run
739
739
740 # bundle1
740 # bundle1
741
741
742 def deltaiter(bundle):
742 def deltaiter(bundle):
743 for delta in bundle.deltaiter():
743 for delta in bundle.deltaiter():
744 pass
744 pass
745
745
746 def iterchunks(bundle):
746 def iterchunks(bundle):
747 for chunk in bundle.getchunks():
747 for chunk in bundle.getchunks():
748 pass
748 pass
749
749
750 # bundle2
750 # bundle2
751
751
752 def forwardchunks(bundle):
752 def forwardchunks(bundle):
753 for chunk in bundle._forwardchunks():
753 for chunk in bundle._forwardchunks():
754 pass
754 pass
755
755
756 def iterparts(bundle):
756 def iterparts(bundle):
757 for part in bundle.iterparts():
757 for part in bundle.iterparts():
758 pass
758 pass
759
759
760 def iterpartsseekable(bundle):
760 def iterpartsseekable(bundle):
761 for part in bundle.iterparts(seekable=True):
761 for part in bundle.iterparts(seekable=True):
762 pass
762 pass
763
763
764 def seek(bundle):
764 def seek(bundle):
765 for part in bundle.iterparts(seekable=True):
765 for part in bundle.iterparts(seekable=True):
766 part.seek(0, os.SEEK_END)
766 part.seek(0, os.SEEK_END)
767
767
768 def makepartreadnbytes(size):
768 def makepartreadnbytes(size):
769 def run():
769 def run():
770 with open(bundlepath, b'rb') as fh:
770 with open(bundlepath, b'rb') as fh:
771 bundle = exchange.readbundle(ui, fh, bundlepath)
771 bundle = exchange.readbundle(ui, fh, bundlepath)
772 for part in bundle.iterparts():
772 for part in bundle.iterparts():
773 while part.read(size):
773 while part.read(size):
774 pass
774 pass
775
775
776 return run
776 return run
777
777
778 benches = [
778 benches = [
779 (makestdioread(8192), b'read(8k)'),
779 (makestdioread(8192), b'read(8k)'),
780 (makestdioread(16384), b'read(16k)'),
780 (makestdioread(16384), b'read(16k)'),
781 (makestdioread(32768), b'read(32k)'),
781 (makestdioread(32768), b'read(32k)'),
782 (makestdioread(131072), b'read(128k)'),
782 (makestdioread(131072), b'read(128k)'),
783 ]
783 ]
784
784
785 with open(bundlepath, b'rb') as fh:
785 with open(bundlepath, b'rb') as fh:
786 bundle = exchange.readbundle(ui, fh, bundlepath)
786 bundle = exchange.readbundle(ui, fh, bundlepath)
787
787
788 if isinstance(bundle, changegroup.cg1unpacker):
788 if isinstance(bundle, changegroup.cg1unpacker):
789 benches.extend([
789 benches.extend([
790 (makebench(deltaiter), b'cg1 deltaiter()'),
790 (makebench(deltaiter), b'cg1 deltaiter()'),
791 (makebench(iterchunks), b'cg1 getchunks()'),
791 (makebench(iterchunks), b'cg1 getchunks()'),
792 (makereadnbytes(8192), b'cg1 read(8k)'),
792 (makereadnbytes(8192), b'cg1 read(8k)'),
793 (makereadnbytes(16384), b'cg1 read(16k)'),
793 (makereadnbytes(16384), b'cg1 read(16k)'),
794 (makereadnbytes(32768), b'cg1 read(32k)'),
794 (makereadnbytes(32768), b'cg1 read(32k)'),
795 (makereadnbytes(131072), b'cg1 read(128k)'),
795 (makereadnbytes(131072), b'cg1 read(128k)'),
796 ])
796 ])
797 elif isinstance(bundle, bundle2.unbundle20):
797 elif isinstance(bundle, bundle2.unbundle20):
798 benches.extend([
798 benches.extend([
799 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
799 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
800 (makebench(iterparts), b'bundle2 iterparts()'),
800 (makebench(iterparts), b'bundle2 iterparts()'),
801 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
801 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
802 (makebench(seek), b'bundle2 part seek()'),
802 (makebench(seek), b'bundle2 part seek()'),
803 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
803 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
804 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
804 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
805 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
805 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
806 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
806 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
807 ])
807 ])
808 elif isinstance(bundle, streamclone.streamcloneapplier):
808 elif isinstance(bundle, streamclone.streamcloneapplier):
809 raise error.Abort(b'stream clone bundles not supported')
809 raise error.Abort(b'stream clone bundles not supported')
810 else:
810 else:
811 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
811 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
812
812
813 for fn, title in benches:
813 for fn, title in benches:
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 timer(fn, title=title)
815 timer(fn, title=title)
816 fm.end()
816 fm.end()
817
817
818 @command(b'perfchangegroupchangelog', formatteropts +
818 @command(b'perfchangegroupchangelog', formatteropts +
819 [(b'', b'cgversion', b'02', b'changegroup version'),
819 [(b'', b'cgversion', b'02', b'changegroup version'),
820 (b'r', b'rev', b'', b'revisions to add to changegroup')])
820 (b'r', b'rev', b'', b'revisions to add to changegroup')])
821 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
821 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
822 """Benchmark producing a changelog group for a changegroup.
822 """Benchmark producing a changelog group for a changegroup.
823
823
824 This measures the time spent processing the changelog during a
824 This measures the time spent processing the changelog during a
825 bundle operation. This occurs during `hg bundle` and on a server
825 bundle operation. This occurs during `hg bundle` and on a server
826 processing a `getbundle` wire protocol request (handles clones
826 processing a `getbundle` wire protocol request (handles clones
827 and pull requests).
827 and pull requests).
828
828
829 By default, all revisions are added to the changegroup.
829 By default, all revisions are added to the changegroup.
830 """
830 """
831 opts = _byteskwargs(opts)
831 opts = _byteskwargs(opts)
832 cl = repo.changelog
832 cl = repo.changelog
833 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
833 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
834 bundler = changegroup.getbundler(cgversion, repo)
834 bundler = changegroup.getbundler(cgversion, repo)
835
835
836 def d():
836 def d():
837 state, chunks = bundler._generatechangelog(cl, nodes)
837 state, chunks = bundler._generatechangelog(cl, nodes)
838 for chunk in chunks:
838 for chunk in chunks:
839 pass
839 pass
840
840
841 timer, fm = gettimer(ui, opts)
841 timer, fm = gettimer(ui, opts)
842
842
843 # Terminal printing can interfere with timing. So disable it.
843 # Terminal printing can interfere with timing. So disable it.
844 with ui.configoverride({(b'progress', b'disable'): True}):
844 with ui.configoverride({(b'progress', b'disable'): True}):
845 timer(d)
845 timer(d)
846
846
847 fm.end()
847 fm.end()
848
848
849 @command(b'perfdirs', formatteropts)
849 @command(b'perfdirs', formatteropts)
850 def perfdirs(ui, repo, **opts):
850 def perfdirs(ui, repo, **opts):
851 opts = _byteskwargs(opts)
851 opts = _byteskwargs(opts)
852 timer, fm = gettimer(ui, opts)
852 timer, fm = gettimer(ui, opts)
853 dirstate = repo.dirstate
853 dirstate = repo.dirstate
854 b'a' in dirstate
854 b'a' in dirstate
855 def d():
855 def d():
856 dirstate.hasdir(b'a')
856 dirstate.hasdir(b'a')
857 del dirstate._map._dirs
857 del dirstate._map._dirs
858 timer(d)
858 timer(d)
859 fm.end()
859 fm.end()
860
860
861 @command(b'perfdirstate', formatteropts)
861 @command(b'perfdirstate', formatteropts)
862 def perfdirstate(ui, repo, **opts):
862 def perfdirstate(ui, repo, **opts):
863 opts = _byteskwargs(opts)
863 opts = _byteskwargs(opts)
864 timer, fm = gettimer(ui, opts)
864 timer, fm = gettimer(ui, opts)
865 b"a" in repo.dirstate
865 b"a" in repo.dirstate
866 def d():
866 def d():
867 repo.dirstate.invalidate()
867 repo.dirstate.invalidate()
868 b"a" in repo.dirstate
868 b"a" in repo.dirstate
869 timer(d)
869 timer(d)
870 fm.end()
870 fm.end()
871
871
872 @command(b'perfdirstatedirs', formatteropts)
872 @command(b'perfdirstatedirs', formatteropts)
873 def perfdirstatedirs(ui, repo, **opts):
873 def perfdirstatedirs(ui, repo, **opts):
874 opts = _byteskwargs(opts)
874 opts = _byteskwargs(opts)
875 timer, fm = gettimer(ui, opts)
875 timer, fm = gettimer(ui, opts)
876 b"a" in repo.dirstate
876 b"a" in repo.dirstate
877 def d():
877 def d():
878 repo.dirstate.hasdir(b"a")
878 repo.dirstate.hasdir(b"a")
879 del repo.dirstate._map._dirs
879 del repo.dirstate._map._dirs
880 timer(d)
880 timer(d)
881 fm.end()
881 fm.end()
882
882
883 @command(b'perfdirstatefoldmap', formatteropts)
883 @command(b'perfdirstatefoldmap', formatteropts)
884 def perfdirstatefoldmap(ui, repo, **opts):
884 def perfdirstatefoldmap(ui, repo, **opts):
885 opts = _byteskwargs(opts)
885 opts = _byteskwargs(opts)
886 timer, fm = gettimer(ui, opts)
886 timer, fm = gettimer(ui, opts)
887 dirstate = repo.dirstate
887 dirstate = repo.dirstate
888 b'a' in dirstate
888 b'a' in dirstate
889 def d():
889 def d():
890 dirstate._map.filefoldmap.get(b'a')
890 dirstate._map.filefoldmap.get(b'a')
891 del dirstate._map.filefoldmap
891 del dirstate._map.filefoldmap
892 timer(d)
892 timer(d)
893 fm.end()
893 fm.end()
894
894
895 @command(b'perfdirfoldmap', formatteropts)
895 @command(b'perfdirfoldmap', formatteropts)
896 def perfdirfoldmap(ui, repo, **opts):
896 def perfdirfoldmap(ui, repo, **opts):
897 opts = _byteskwargs(opts)
897 opts = _byteskwargs(opts)
898 timer, fm = gettimer(ui, opts)
898 timer, fm = gettimer(ui, opts)
899 dirstate = repo.dirstate
899 dirstate = repo.dirstate
900 b'a' in dirstate
900 b'a' in dirstate
901 def d():
901 def d():
902 dirstate._map.dirfoldmap.get(b'a')
902 dirstate._map.dirfoldmap.get(b'a')
903 del dirstate._map.dirfoldmap
903 del dirstate._map.dirfoldmap
904 del dirstate._map._dirs
904 del dirstate._map._dirs
905 timer(d)
905 timer(d)
906 fm.end()
906 fm.end()
907
907
908 @command(b'perfdirstatewrite', formatteropts)
908 @command(b'perfdirstatewrite', formatteropts)
909 def perfdirstatewrite(ui, repo, **opts):
909 def perfdirstatewrite(ui, repo, **opts):
910 opts = _byteskwargs(opts)
910 opts = _byteskwargs(opts)
911 timer, fm = gettimer(ui, opts)
911 timer, fm = gettimer(ui, opts)
912 ds = repo.dirstate
912 ds = repo.dirstate
913 b"a" in ds
913 b"a" in ds
914 def d():
914 def d():
915 ds._dirty = True
915 ds._dirty = True
916 ds.write(repo.currenttransaction())
916 ds.write(repo.currenttransaction())
917 timer(d)
917 timer(d)
918 fm.end()
918 fm.end()
919
919
920 @command(b'perfmergecalculate',
920 @command(b'perfmergecalculate',
921 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
921 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
922 def perfmergecalculate(ui, repo, rev, **opts):
922 def perfmergecalculate(ui, repo, rev, **opts):
923 opts = _byteskwargs(opts)
923 opts = _byteskwargs(opts)
924 timer, fm = gettimer(ui, opts)
924 timer, fm = gettimer(ui, opts)
925 wctx = repo[None]
925 wctx = repo[None]
926 rctx = scmutil.revsingle(repo, rev, rev)
926 rctx = scmutil.revsingle(repo, rev, rev)
927 ancestor = wctx.ancestor(rctx)
927 ancestor = wctx.ancestor(rctx)
928 # we don't want working dir files to be stat'd in the benchmark, so prime
928 # we don't want working dir files to be stat'd in the benchmark, so prime
929 # that cache
929 # that cache
930 wctx.dirty()
930 wctx.dirty()
931 def d():
931 def d():
932 # acceptremote is True because we don't want prompts in the middle of
932 # acceptremote is True because we don't want prompts in the middle of
933 # our benchmark
933 # our benchmark
934 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
934 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
935 acceptremote=True, followcopies=True)
935 acceptremote=True, followcopies=True)
936 timer(d)
936 timer(d)
937 fm.end()
937 fm.end()
938
938
939 @command(b'perfpathcopies', [], b"REV REV")
939 @command(b'perfpathcopies', [], b"REV REV")
940 def perfpathcopies(ui, repo, rev1, rev2, **opts):
940 def perfpathcopies(ui, repo, rev1, rev2, **opts):
941 """benchmark the copy tracing logic"""
941 """benchmark the copy tracing logic"""
942 opts = _byteskwargs(opts)
942 opts = _byteskwargs(opts)
943 timer, fm = gettimer(ui, opts)
943 timer, fm = gettimer(ui, opts)
944 ctx1 = scmutil.revsingle(repo, rev1, rev1)
944 ctx1 = scmutil.revsingle(repo, rev1, rev1)
945 ctx2 = scmutil.revsingle(repo, rev2, rev2)
945 ctx2 = scmutil.revsingle(repo, rev2, rev2)
946 def d():
946 def d():
947 copies.pathcopies(ctx1, ctx2)
947 copies.pathcopies(ctx1, ctx2)
948 timer(d)
948 timer(d)
949 fm.end()
949 fm.end()
950
950
951 @command(b'perfphases',
951 @command(b'perfphases',
952 [(b'', b'full', False, b'include file reading time too'),
952 [(b'', b'full', False, b'include file reading time too'),
953 ], b"")
953 ], b"")
954 def perfphases(ui, repo, **opts):
954 def perfphases(ui, repo, **opts):
955 """benchmark phasesets computation"""
955 """benchmark phasesets computation"""
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957 timer, fm = gettimer(ui, opts)
957 timer, fm = gettimer(ui, opts)
958 _phases = repo._phasecache
958 _phases = repo._phasecache
959 full = opts.get(b'full')
959 full = opts.get(b'full')
960 def d():
960 def d():
961 phases = _phases
961 phases = _phases
962 if full:
962 if full:
963 clearfilecache(repo, b'_phasecache')
963 clearfilecache(repo, b'_phasecache')
964 phases = repo._phasecache
964 phases = repo._phasecache
965 phases.invalidate()
965 phases.invalidate()
966 phases.loadphaserevs(repo)
966 phases.loadphaserevs(repo)
967 timer(d)
967 timer(d)
968 fm.end()
968 fm.end()
969
969
970 @command(b'perfphasesremote',
970 @command(b'perfphasesremote',
971 [], b"[DEST]")
971 [], b"[DEST]")
972 def perfphasesremote(ui, repo, dest=None, **opts):
972 def perfphasesremote(ui, repo, dest=None, **opts):
973 """benchmark time needed to analyse phases of the remote server"""
973 """benchmark time needed to analyse phases of the remote server"""
974 from mercurial.node import (
974 from mercurial.node import (
975 bin,
975 bin,
976 )
976 )
977 from mercurial import (
977 from mercurial import (
978 exchange,
978 exchange,
979 hg,
979 hg,
980 phases,
980 phases,
981 )
981 )
982 opts = _byteskwargs(opts)
982 opts = _byteskwargs(opts)
983 timer, fm = gettimer(ui, opts)
983 timer, fm = gettimer(ui, opts)
984
984
985 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
985 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
986 if not path:
986 if not path:
987 raise error.Abort((b'default repository not configured!'),
987 raise error.Abort((b'default repository not configured!'),
988 hint=(b"see 'hg help config.paths'"))
988 hint=(b"see 'hg help config.paths'"))
989 dest = path.pushloc or path.loc
989 dest = path.pushloc or path.loc
990 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
990 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
991 other = hg.peer(repo, opts, dest)
991 other = hg.peer(repo, opts, dest)
992
992
993 # easier to perform discovery through the operation
993 # easier to perform discovery through the operation
994 op = exchange.pushoperation(repo, other)
994 op = exchange.pushoperation(repo, other)
995 exchange._pushdiscoverychangeset(op)
995 exchange._pushdiscoverychangeset(op)
996
996
997 remotesubset = op.fallbackheads
997 remotesubset = op.fallbackheads
998
998
999 with other.commandexecutor() as e:
999 with other.commandexecutor() as e:
1000 remotephases = e.callcommand(b'listkeys',
1000 remotephases = e.callcommand(b'listkeys',
1001 {b'namespace': b'phases'}).result()
1001 {b'namespace': b'phases'}).result()
1002 del other
1002 del other
1003 publishing = remotephases.get(b'publishing', False)
1003 publishing = remotephases.get(b'publishing', False)
1004 if publishing:
1004 if publishing:
1005 ui.status((b'publishing: yes\n'))
1005 ui.status((b'publishing: yes\n'))
1006 else:
1006 else:
1007 ui.status((b'publishing: no\n'))
1007 ui.status((b'publishing: no\n'))
1008
1008
1009 nodemap = repo.changelog.nodemap
1009 nodemap = repo.changelog.nodemap
1010 nonpublishroots = 0
1010 nonpublishroots = 0
1011 for nhex, phase in remotephases.iteritems():
1011 for nhex, phase in remotephases.iteritems():
1012 if nhex == b'publishing': # ignore data related to publish option
1012 if nhex == b'publishing': # ignore data related to publish option
1013 continue
1013 continue
1014 node = bin(nhex)
1014 node = bin(nhex)
1015 if node in nodemap and int(phase):
1015 if node in nodemap and int(phase):
1016 nonpublishroots += 1
1016 nonpublishroots += 1
1017 ui.status((b'number of roots: %d\n') % len(remotephases))
1017 ui.status((b'number of roots: %d\n') % len(remotephases))
1018 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1018 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1019 def d():
1019 def d():
1020 phases.remotephasessummary(repo,
1020 phases.remotephasessummary(repo,
1021 remotesubset,
1021 remotesubset,
1022 remotephases)
1022 remotephases)
1023 timer(d)
1023 timer(d)
1024 fm.end()
1024 fm.end()
1025
1025
1026 @command(b'perfmanifest',[
1026 @command(b'perfmanifest',[
1027 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1027 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1028 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1028 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1029 ] + formatteropts, b'REV|NODE')
1029 ] + formatteropts, b'REV|NODE')
1030 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1030 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1031 """benchmark the time to read a manifest from disk and return a usable
1031 """benchmark the time to read a manifest from disk and return a usable
1032 dict-like object
1032 dict-like object
1033
1033
1034 Manifest caches are cleared before retrieval."""
1034 Manifest caches are cleared before retrieval."""
1035 opts = _byteskwargs(opts)
1035 opts = _byteskwargs(opts)
1036 timer, fm = gettimer(ui, opts)
1036 timer, fm = gettimer(ui, opts)
1037 if not manifest_rev:
1037 if not manifest_rev:
1038 ctx = scmutil.revsingle(repo, rev, rev)
1038 ctx = scmutil.revsingle(repo, rev, rev)
1039 t = ctx.manifestnode()
1039 t = ctx.manifestnode()
1040 else:
1040 else:
1041 from mercurial.node import bin
1041 from mercurial.node import bin
1042
1042
1043 if len(rev) == 40:
1043 if len(rev) == 40:
1044 t = bin(rev)
1044 t = bin(rev)
1045 else:
1045 else:
1046 try:
1046 try:
1047 rev = int(rev)
1047 rev = int(rev)
1048
1048
1049 if util.safehasattr(repo.manifestlog, b'getstorage'):
1049 if util.safehasattr(repo.manifestlog, b'getstorage'):
1050 t = repo.manifestlog.getstorage(b'').node(rev)
1050 t = repo.manifestlog.getstorage(b'').node(rev)
1051 else:
1051 else:
1052 t = repo.manifestlog._revlog.lookup(rev)
1052 t = repo.manifestlog._revlog.lookup(rev)
1053 except ValueError:
1053 except ValueError:
1054 raise error.Abort(b'manifest revision must be integer or full '
1054 raise error.Abort(b'manifest revision must be integer or full '
1055 b'node')
1055 b'node')
1056 def d():
1056 def d():
1057 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1057 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1058 repo.manifestlog[t].read()
1058 repo.manifestlog[t].read()
1059 timer(d)
1059 timer(d)
1060 fm.end()
1060 fm.end()
1061
1061
1062 @command(b'perfchangeset', formatteropts)
1062 @command(b'perfchangeset', formatteropts)
1063 def perfchangeset(ui, repo, rev, **opts):
1063 def perfchangeset(ui, repo, rev, **opts):
1064 opts = _byteskwargs(opts)
1064 opts = _byteskwargs(opts)
1065 timer, fm = gettimer(ui, opts)
1065 timer, fm = gettimer(ui, opts)
1066 n = scmutil.revsingle(repo, rev).node()
1066 n = scmutil.revsingle(repo, rev).node()
1067 def d():
1067 def d():
1068 repo.changelog.read(n)
1068 repo.changelog.read(n)
1069 #repo.changelog._cache = None
1069 #repo.changelog._cache = None
1070 timer(d)
1070 timer(d)
1071 fm.end()
1071 fm.end()
1072
1072
1073 @command(b'perfignore', formatteropts)
1073 @command(b'perfignore', formatteropts)
1074 def perfignore(ui, repo, **opts):
1074 def perfignore(ui, repo, **opts):
1075 """benchmark operation related to computing ignore"""
1075 """benchmark operation related to computing ignore"""
1076 opts = _byteskwargs(opts)
1076 opts = _byteskwargs(opts)
1077 timer, fm = gettimer(ui, opts)
1077 timer, fm = gettimer(ui, opts)
1078 dirstate = repo.dirstate
1078 dirstate = repo.dirstate
1079
1079
1080 def setupone():
1080 def setupone():
1081 dirstate.invalidate()
1081 dirstate.invalidate()
1082 clearfilecache(dirstate, b'_ignore')
1082 clearfilecache(dirstate, b'_ignore')
1083
1083
1084 def runone():
1084 def runone():
1085 dirstate._ignore
1085 dirstate._ignore
1086
1086
1087 timer(runone, setup=setupone, title=b"load")
1087 timer(runone, setup=setupone, title=b"load")
1088 fm.end()
1088 fm.end()
1089
1089
1090 @command(b'perfindex', [
1090 @command(b'perfindex', [
1091 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1091 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1092 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1092 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1093 ] + formatteropts)
1093 ] + formatteropts)
1094 def perfindex(ui, repo, **opts):
1094 def perfindex(ui, repo, **opts):
1095 """benchmark index creation time followed by a lookup
1095 """benchmark index creation time followed by a lookup
1096
1096
1097 The default is to look `tip` up. Depending on the index implementation,
1097 The default is to look `tip` up. Depending on the index implementation,
1098 the revision looked up can matters. For example, an implementation
1098 the revision looked up can matters. For example, an implementation
1099 scanning the index will have a faster lookup time for `--rev tip` than for
1099 scanning the index will have a faster lookup time for `--rev tip` than for
1100 `--rev 0`. The number of looked up revisions and their order can also
1100 `--rev 0`. The number of looked up revisions and their order can also
1101 matters.
1101 matters.
1102
1102
1103 Example of useful set to test:
1103 Example of useful set to test:
1104 * tip
1104 * tip
1105 * 0
1105 * 0
1106 * -10:
1106 * -10:
1107 * :10
1107 * :10
1108 * -10: + :10
1108 * -10: + :10
1109 * :10: + -10:
1109 * :10: + -10:
1110 * -10000:
1110 * -10000:
1111 * -10000: + 0
1111 * -10000: + 0
1112
1112
1113 It is not currently possible to check for lookup of a missing node. For
1113 It is not currently possible to check for lookup of a missing node. For
1114 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1114 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1115 import mercurial.revlog
1115 import mercurial.revlog
1116 opts = _byteskwargs(opts)
1116 opts = _byteskwargs(opts)
1117 timer, fm = gettimer(ui, opts)
1117 timer, fm = gettimer(ui, opts)
1118 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1118 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1119 if opts[b'no_lookup']:
1119 if opts[b'no_lookup']:
1120 if opts['rev']:
1120 if opts['rev']:
1121 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1121 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1122 nodes = []
1122 nodes = []
1123 elif not opts[b'rev']:
1123 elif not opts[b'rev']:
1124 nodes = [repo[b"tip"].node()]
1124 nodes = [repo[b"tip"].node()]
1125 else:
1125 else:
1126 revs = scmutil.revrange(repo, opts[b'rev'])
1126 revs = scmutil.revrange(repo, opts[b'rev'])
1127 cl = repo.changelog
1127 cl = repo.changelog
1128 nodes = [cl.node(r) for r in revs]
1128 nodes = [cl.node(r) for r in revs]
1129
1129
1130 unfi = repo.unfiltered()
1130 unfi = repo.unfiltered()
1131 # find the filecache func directly
1131 # find the filecache func directly
1132 # This avoid polluting the benchmark with the filecache logic
1132 # This avoid polluting the benchmark with the filecache logic
1133 makecl = unfi.__class__.changelog.func
1133 makecl = unfi.__class__.changelog.func
1134 def setup():
1134 def setup():
1135 # probably not necessary, but for good measure
1135 # probably not necessary, but for good measure
1136 clearchangelog(unfi)
1136 clearchangelog(unfi)
1137 def d():
1137 def d():
1138 cl = makecl(unfi)
1138 cl = makecl(unfi)
1139 for n in nodes:
1139 for n in nodes:
1140 cl.rev(n)
1140 cl.rev(n)
1141 timer(d, setup=setup)
1141 timer(d, setup=setup)
1142 fm.end()
1142 fm.end()
1143
1143
1144 @command(b'perfnodemap', [
1144 @command(b'perfnodemap', [
1145 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1145 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1146 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1146 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1147 ] + formatteropts)
1147 ] + formatteropts)
1148 def perfnodemap(ui, repo, **opts):
1148 def perfnodemap(ui, repo, **opts):
1149 """benchmark the time necessary to look up revision from a cold nodemap
1149 """benchmark the time necessary to look up revision from a cold nodemap
1150
1150
1151 Depending on the implementation, the amount and order of revision we look
1151 Depending on the implementation, the amount and order of revision we look
1152 up can varies. Example of useful set to test:
1152 up can varies. Example of useful set to test:
1153 * tip
1153 * tip
1154 * 0
1154 * 0
1155 * -10:
1155 * -10:
1156 * :10
1156 * :10
1157 * -10: + :10
1157 * -10: + :10
1158 * :10: + -10:
1158 * :10: + -10:
1159 * -10000:
1159 * -10000:
1160 * -10000: + 0
1160 * -10000: + 0
1161
1161
1162 The command currently focus on valid binary lookup. Benchmarking for
1162 The command currently focus on valid binary lookup. Benchmarking for
1163 hexlookup, prefix lookup and missing lookup would also be valuable.
1163 hexlookup, prefix lookup and missing lookup would also be valuable.
1164 """
1164 """
1165 import mercurial.revlog
1165 import mercurial.revlog
1166 opts = _byteskwargs(opts)
1166 opts = _byteskwargs(opts)
1167 timer, fm = gettimer(ui, opts)
1167 timer, fm = gettimer(ui, opts)
1168 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1168 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1169
1169
1170 unfi = repo.unfiltered()
1170 unfi = repo.unfiltered()
1171 clearcaches = opts['clear_caches']
1171 clearcaches = opts['clear_caches']
1172 # find the filecache func directly
1172 # find the filecache func directly
1173 # This avoid polluting the benchmark with the filecache logic
1173 # This avoid polluting the benchmark with the filecache logic
1174 makecl = unfi.__class__.changelog.func
1174 makecl = unfi.__class__.changelog.func
1175 if not opts[b'rev']:
1175 if not opts[b'rev']:
1176 raise error.Abort('use --rev to specify revisions to look up')
1176 raise error.Abort('use --rev to specify revisions to look up')
1177 revs = scmutil.revrange(repo, opts[b'rev'])
1177 revs = scmutil.revrange(repo, opts[b'rev'])
1178 cl = repo.changelog
1178 cl = repo.changelog
1179 nodes = [cl.node(r) for r in revs]
1179 nodes = [cl.node(r) for r in revs]
1180
1180
1181 # use a list to pass reference to a nodemap from one closure to the next
1181 # use a list to pass reference to a nodemap from one closure to the next
1182 nodeget = [None]
1182 nodeget = [None]
1183 def setnodeget():
1183 def setnodeget():
1184 # probably not necessary, but for good measure
1184 # probably not necessary, but for good measure
1185 clearchangelog(unfi)
1185 clearchangelog(unfi)
1186 nodeget[0] = makecl(unfi).nodemap.get
1186 nodeget[0] = makecl(unfi).nodemap.get
1187
1187
1188 def d():
1188 def d():
1189 get = nodeget[0]
1189 get = nodeget[0]
1190 for n in nodes:
1190 for n in nodes:
1191 get(n)
1191 get(n)
1192
1192
1193 setup = None
1193 setup = None
1194 if clearcaches:
1194 if clearcaches:
1195 def setup():
1195 def setup():
1196 setnodeget()
1196 setnodeget()
1197 else:
1197 else:
1198 setnodeget()
1198 setnodeget()
1199 d() # prewarm the data structure
1199 d() # prewarm the data structure
1200 timer(d, setup=setup)
1200 timer(d, setup=setup)
1201 fm.end()
1201 fm.end()
1202
1202
1203 @command(b'perfstartup', formatteropts)
1203 @command(b'perfstartup', formatteropts)
1204 def perfstartup(ui, repo, **opts):
1204 def perfstartup(ui, repo, **opts):
1205 opts = _byteskwargs(opts)
1205 opts = _byteskwargs(opts)
1206 timer, fm = gettimer(ui, opts)
1206 timer, fm = gettimer(ui, opts)
1207 def d():
1207 def d():
1208 if os.name != r'nt':
1208 if os.name != r'nt':
1209 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1209 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1210 fsencode(sys.argv[0]))
1210 fsencode(sys.argv[0]))
1211 else:
1211 else:
1212 os.environ[r'HGRCPATH'] = r' '
1212 os.environ[r'HGRCPATH'] = r' '
1213 os.system(r"%s version -q > NUL" % sys.argv[0])
1213 os.system(r"%s version -q > NUL" % sys.argv[0])
1214 timer(d)
1214 timer(d)
1215 fm.end()
1215 fm.end()
1216
1216
1217 @command(b'perfparents', formatteropts)
1217 @command(b'perfparents', formatteropts)
1218 def perfparents(ui, repo, **opts):
1218 def perfparents(ui, repo, **opts):
1219 """benchmark the time necessary to fetch one changeset's parents.
1219 """benchmark the time necessary to fetch one changeset's parents.
1220
1220
1221 The fetch is done using the `node identifier`, traversing all object layers
1221 The fetch is done using the `node identifier`, traversing all object layers
1222 from the repository object. The first N revisions will be used for this
1222 from the repository object. The first N revisions will be used for this
1223 benchmark. N is controlled by the ``perf.parentscount`` config option
1223 benchmark. N is controlled by the ``perf.parentscount`` config option
1224 (default: 1000).
1224 (default: 1000).
1225 """
1225 """
1226 opts = _byteskwargs(opts)
1226 opts = _byteskwargs(opts)
1227 timer, fm = gettimer(ui, opts)
1227 timer, fm = gettimer(ui, opts)
1228 # control the number of commits perfparents iterates over
1228 # control the number of commits perfparents iterates over
1229 # experimental config: perf.parentscount
1229 # experimental config: perf.parentscount
1230 count = getint(ui, b"perf", b"parentscount", 1000)
1230 count = getint(ui, b"perf", b"parentscount", 1000)
1231 if len(repo.changelog) < count:
1231 if len(repo.changelog) < count:
1232 raise error.Abort(b"repo needs %d commits for this test" % count)
1232 raise error.Abort(b"repo needs %d commits for this test" % count)
1233 repo = repo.unfiltered()
1233 repo = repo.unfiltered()
1234 nl = [repo.changelog.node(i) for i in _xrange(count)]
1234 nl = [repo.changelog.node(i) for i in _xrange(count)]
1235 def d():
1235 def d():
1236 for n in nl:
1236 for n in nl:
1237 repo.changelog.parents(n)
1237 repo.changelog.parents(n)
1238 timer(d)
1238 timer(d)
1239 fm.end()
1239 fm.end()
1240
1240
1241 @command(b'perfctxfiles', formatteropts)
1241 @command(b'perfctxfiles', formatteropts)
1242 def perfctxfiles(ui, repo, x, **opts):
1242 def perfctxfiles(ui, repo, x, **opts):
1243 opts = _byteskwargs(opts)
1243 opts = _byteskwargs(opts)
1244 x = int(x)
1244 x = int(x)
1245 timer, fm = gettimer(ui, opts)
1245 timer, fm = gettimer(ui, opts)
1246 def d():
1246 def d():
1247 len(repo[x].files())
1247 len(repo[x].files())
1248 timer(d)
1248 timer(d)
1249 fm.end()
1249 fm.end()
1250
1250
1251 @command(b'perfrawfiles', formatteropts)
1251 @command(b'perfrawfiles', formatteropts)
1252 def perfrawfiles(ui, repo, x, **opts):
1252 def perfrawfiles(ui, repo, x, **opts):
1253 opts = _byteskwargs(opts)
1253 opts = _byteskwargs(opts)
1254 x = int(x)
1254 x = int(x)
1255 timer, fm = gettimer(ui, opts)
1255 timer, fm = gettimer(ui, opts)
1256 cl = repo.changelog
1256 cl = repo.changelog
1257 def d():
1257 def d():
1258 len(cl.read(x)[3])
1258 len(cl.read(x)[3])
1259 timer(d)
1259 timer(d)
1260 fm.end()
1260 fm.end()
1261
1261
1262 @command(b'perflookup', formatteropts)
1262 @command(b'perflookup', formatteropts)
1263 def perflookup(ui, repo, rev, **opts):
1263 def perflookup(ui, repo, rev, **opts):
1264 opts = _byteskwargs(opts)
1264 opts = _byteskwargs(opts)
1265 timer, fm = gettimer(ui, opts)
1265 timer, fm = gettimer(ui, opts)
1266 timer(lambda: len(repo.lookup(rev)))
1266 timer(lambda: len(repo.lookup(rev)))
1267 fm.end()
1267 fm.end()
1268
1268
1269 @command(b'perflinelogedits',
1269 @command(b'perflinelogedits',
1270 [(b'n', b'edits', 10000, b'number of edits'),
1270 [(b'n', b'edits', 10000, b'number of edits'),
1271 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1271 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1272 ], norepo=True)
1272 ], norepo=True)
1273 def perflinelogedits(ui, **opts):
1273 def perflinelogedits(ui, **opts):
1274 from mercurial import linelog
1274 from mercurial import linelog
1275
1275
1276 opts = _byteskwargs(opts)
1276 opts = _byteskwargs(opts)
1277
1277
1278 edits = opts[b'edits']
1278 edits = opts[b'edits']
1279 maxhunklines = opts[b'max_hunk_lines']
1279 maxhunklines = opts[b'max_hunk_lines']
1280
1280
1281 maxb1 = 100000
1281 maxb1 = 100000
1282 random.seed(0)
1282 random.seed(0)
1283 randint = random.randint
1283 randint = random.randint
1284 currentlines = 0
1284 currentlines = 0
1285 arglist = []
1285 arglist = []
1286 for rev in _xrange(edits):
1286 for rev in _xrange(edits):
1287 a1 = randint(0, currentlines)
1287 a1 = randint(0, currentlines)
1288 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1288 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1289 b1 = randint(0, maxb1)
1289 b1 = randint(0, maxb1)
1290 b2 = randint(b1, b1 + maxhunklines)
1290 b2 = randint(b1, b1 + maxhunklines)
1291 currentlines += (b2 - b1) - (a2 - a1)
1291 currentlines += (b2 - b1) - (a2 - a1)
1292 arglist.append((rev, a1, a2, b1, b2))
1292 arglist.append((rev, a1, a2, b1, b2))
1293
1293
1294 def d():
1294 def d():
1295 ll = linelog.linelog()
1295 ll = linelog.linelog()
1296 for args in arglist:
1296 for args in arglist:
1297 ll.replacelines(*args)
1297 ll.replacelines(*args)
1298
1298
1299 timer, fm = gettimer(ui, opts)
1299 timer, fm = gettimer(ui, opts)
1300 timer(d)
1300 timer(d)
1301 fm.end()
1301 fm.end()
1302
1302
1303 @command(b'perfrevrange', formatteropts)
1303 @command(b'perfrevrange', formatteropts)
1304 def perfrevrange(ui, repo, *specs, **opts):
1304 def perfrevrange(ui, repo, *specs, **opts):
1305 opts = _byteskwargs(opts)
1305 opts = _byteskwargs(opts)
1306 timer, fm = gettimer(ui, opts)
1306 timer, fm = gettimer(ui, opts)
1307 revrange = scmutil.revrange
1307 revrange = scmutil.revrange
1308 timer(lambda: len(revrange(repo, specs)))
1308 timer(lambda: len(revrange(repo, specs)))
1309 fm.end()
1309 fm.end()
1310
1310
1311 @command(b'perfnodelookup', formatteropts)
1311 @command(b'perfnodelookup', formatteropts)
1312 def perfnodelookup(ui, repo, rev, **opts):
1312 def perfnodelookup(ui, repo, rev, **opts):
1313 opts = _byteskwargs(opts)
1313 opts = _byteskwargs(opts)
1314 timer, fm = gettimer(ui, opts)
1314 timer, fm = gettimer(ui, opts)
1315 import mercurial.revlog
1315 import mercurial.revlog
1316 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1316 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1317 n = scmutil.revsingle(repo, rev).node()
1317 n = scmutil.revsingle(repo, rev).node()
1318 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1318 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1319 def d():
1319 def d():
1320 cl.rev(n)
1320 cl.rev(n)
1321 clearcaches(cl)
1321 clearcaches(cl)
1322 timer(d)
1322 timer(d)
1323 fm.end()
1323 fm.end()
1324
1324
1325 @command(b'perflog',
1325 @command(b'perflog',
1326 [(b'', b'rename', False, b'ask log to follow renames')
1326 [(b'', b'rename', False, b'ask log to follow renames')
1327 ] + formatteropts)
1327 ] + formatteropts)
1328 def perflog(ui, repo, rev=None, **opts):
1328 def perflog(ui, repo, rev=None, **opts):
1329 opts = _byteskwargs(opts)
1329 opts = _byteskwargs(opts)
1330 if rev is None:
1330 if rev is None:
1331 rev=[]
1331 rev=[]
1332 timer, fm = gettimer(ui, opts)
1332 timer, fm = gettimer(ui, opts)
1333 ui.pushbuffer()
1333 ui.pushbuffer()
1334 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1334 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1335 copies=opts.get(b'rename')))
1335 copies=opts.get(b'rename')))
1336 ui.popbuffer()
1336 ui.popbuffer()
1337 fm.end()
1337 fm.end()
1338
1338
1339 @command(b'perfmoonwalk', formatteropts)
1339 @command(b'perfmoonwalk', formatteropts)
1340 def perfmoonwalk(ui, repo, **opts):
1340 def perfmoonwalk(ui, repo, **opts):
1341 """benchmark walking the changelog backwards
1341 """benchmark walking the changelog backwards
1342
1342
1343 This also loads the changelog data for each revision in the changelog.
1343 This also loads the changelog data for each revision in the changelog.
1344 """
1344 """
1345 opts = _byteskwargs(opts)
1345 opts = _byteskwargs(opts)
1346 timer, fm = gettimer(ui, opts)
1346 timer, fm = gettimer(ui, opts)
1347 def moonwalk():
1347 def moonwalk():
1348 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1348 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1349 ctx = repo[i]
1349 ctx = repo[i]
1350 ctx.branch() # read changelog data (in addition to the index)
1350 ctx.branch() # read changelog data (in addition to the index)
1351 timer(moonwalk)
1351 timer(moonwalk)
1352 fm.end()
1352 fm.end()
1353
1353
1354 @command(b'perftemplating',
1354 @command(b'perftemplating',
1355 [(b'r', b'rev', [], b'revisions to run the template on'),
1355 [(b'r', b'rev', [], b'revisions to run the template on'),
1356 ] + formatteropts)
1356 ] + formatteropts)
1357 def perftemplating(ui, repo, testedtemplate=None, **opts):
1357 def perftemplating(ui, repo, testedtemplate=None, **opts):
1358 """test the rendering time of a given template"""
1358 """test the rendering time of a given template"""
1359 if makelogtemplater is None:
1359 if makelogtemplater is None:
1360 raise error.Abort((b"perftemplating not available with this Mercurial"),
1360 raise error.Abort((b"perftemplating not available with this Mercurial"),
1361 hint=b"use 4.3 or later")
1361 hint=b"use 4.3 or later")
1362
1362
1363 opts = _byteskwargs(opts)
1363 opts = _byteskwargs(opts)
1364
1364
1365 nullui = ui.copy()
1365 nullui = ui.copy()
1366 nullui.fout = open(os.devnull, r'wb')
1366 nullui.fout = open(os.devnull, r'wb')
1367 nullui.disablepager()
1367 nullui.disablepager()
1368 revs = opts.get(b'rev')
1368 revs = opts.get(b'rev')
1369 if not revs:
1369 if not revs:
1370 revs = [b'all()']
1370 revs = [b'all()']
1371 revs = list(scmutil.revrange(repo, revs))
1371 revs = list(scmutil.revrange(repo, revs))
1372
1372
1373 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1373 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1374 b' {author|person}: {desc|firstline}\n')
1374 b' {author|person}: {desc|firstline}\n')
1375 if testedtemplate is None:
1375 if testedtemplate is None:
1376 testedtemplate = defaulttemplate
1376 testedtemplate = defaulttemplate
1377 displayer = makelogtemplater(nullui, repo, testedtemplate)
1377 displayer = makelogtemplater(nullui, repo, testedtemplate)
1378 def format():
1378 def format():
1379 for r in revs:
1379 for r in revs:
1380 ctx = repo[r]
1380 ctx = repo[r]
1381 displayer.show(ctx)
1381 displayer.show(ctx)
1382 displayer.flush(ctx)
1382 displayer.flush(ctx)
1383
1383
1384 timer, fm = gettimer(ui, opts)
1384 timer, fm = gettimer(ui, opts)
1385 timer(format)
1385 timer(format)
1386 fm.end()
1386 fm.end()
1387
1387
1388 @command(b'perfhelper-pathcopies', formatteropts +
1388 @command(b'perfhelper-pathcopies', formatteropts +
1389 [
1389 [
1390 (b'r', b'revs', [], b'restrict search to these revisions'),
1390 (b'r', b'revs', [], b'restrict search to these revisions'),
1391 (b'', b'timing', False, b'provides extra data (costly)'),
1391 (b'', b'timing', False, b'provides extra data (costly)'),
1392 ])
1392 ])
1393 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1393 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1394 """find statistic about potential parameters for the `perftracecopies`
1394 """find statistic about potential parameters for the `perftracecopies`
1395
1395
1396 This command find source-destination pair relevant for copytracing testing.
1396 This command find source-destination pair relevant for copytracing testing.
1397 It report value for some of the parameters that impact copy tracing time.
1397 It report value for some of the parameters that impact copy tracing time.
1398
1398
1399 If `--timing` is set, rename detection is run and the associated timing
1399 If `--timing` is set, rename detection is run and the associated timing
1400 will be reported. The extra details comes at the cost of a slower command
1400 will be reported. The extra details comes at the cost of a slower command
1401 execution.
1401 execution.
1402
1402
1403 Since the rename detection is only run once, other factors might easily
1403 Since the rename detection is only run once, other factors might easily
1404 affect the precision of the timing. However it should give a good
1404 affect the precision of the timing. However it should give a good
1405 approximation of which revision pairs are very costly.
1405 approximation of which revision pairs are very costly.
1406 """
1406 """
1407 opts = _byteskwargs(opts)
1407 opts = _byteskwargs(opts)
1408 fm = ui.formatter(b'perf', opts)
1408 fm = ui.formatter(b'perf', opts)
1409 dotiming = opts[b'timing']
1409 dotiming = opts[b'timing']
1410
1410
1411 if dotiming:
1411 if dotiming:
1412 header = '%12s %12s %12s %12s %12s %12s\n'
1412 header = '%12s %12s %12s %12s %12s %12s\n'
1413 output = ("%(source)12s %(destination)12s "
1413 output = ("%(source)12s %(destination)12s "
1414 "%(nbrevs)12d %(nbmissingfiles)12d "
1414 "%(nbrevs)12d %(nbmissingfiles)12d "
1415 "%(nbrenamedfiles)12d %(time)18.5f\n")
1415 "%(nbrenamedfiles)12d %(time)18.5f\n")
1416 header_names = ("source", "destination", "nb-revs", "nb-files",
1416 header_names = ("source", "destination", "nb-revs", "nb-files",
1417 "nb-renames", "time")
1417 "nb-renames", "time")
1418 fm.plain(header % header_names)
1418 fm.plain(header % header_names)
1419 else:
1419 else:
1420 header = '%12s %12s %12s %12s\n'
1420 header = '%12s %12s %12s %12s\n'
1421 output = ("%(source)12s %(destination)12s "
1421 output = ("%(source)12s %(destination)12s "
1422 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1422 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1423 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1423 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1424
1424
1425 if not revs:
1425 if not revs:
1426 revs = ['all()']
1426 revs = ['all()']
1427 revs = scmutil.revrange(repo, revs)
1427 revs = scmutil.revrange(repo, revs)
1428
1428
1429 roi = repo.revs('merge() and %ld', revs)
1429 roi = repo.revs('merge() and %ld', revs)
1430 for r in roi:
1430 for r in roi:
1431 ctx = repo[r]
1431 ctx = repo[r]
1432 p1 = ctx.p1().rev()
1432 p1 = ctx.p1().rev()
1433 p2 = ctx.p2().rev()
1433 p2 = ctx.p2().rev()
1434 bases = repo.changelog._commonancestorsheads(p1, p2)
1434 bases = repo.changelog._commonancestorsheads(p1, p2)
1435 for p in (p1, p2):
1435 for p in (p1, p2):
1436 for b in bases:
1436 for b in bases:
1437 base = repo[b]
1437 base = repo[b]
1438 parent = repo[p]
1438 parent = repo[p]
1439 missing = copies._computeforwardmissing(base, parent)
1439 missing = copies._computeforwardmissing(base, parent)
1440 if not missing:
1440 if not missing:
1441 continue
1441 continue
1442 data = {
1442 data = {
1443 b'source': base.hex(),
1443 b'source': base.hex(),
1444 b'destination': parent.hex(),
1444 b'destination': parent.hex(),
1445 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1445 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1446 b'nbmissingfiles': len(missing),
1446 b'nbmissingfiles': len(missing),
1447 }
1447 }
1448 if dotiming:
1448 if dotiming:
1449 begin = util.timer()
1449 begin = util.timer()
1450 renames = copies.pathcopies(base, parent)
1450 renames = copies.pathcopies(base, parent)
1451 end = util.timer()
1451 end = util.timer()
1452 # not very stable timing since we did only one run
1452 # not very stable timing since we did only one run
1453 data['time'] = end - begin
1453 data['time'] = end - begin
1454 data['nbrenamedfiles'] = len(renames)
1454 data['nbrenamedfiles'] = len(renames)
1455 fm.startitem()
1455 fm.startitem()
1456 fm.data(**data)
1456 fm.data(**data)
1457 out = data.copy()
1457 out = data.copy()
1458 out['source'] = fm.hexfunc(base.node())
1458 out['source'] = fm.hexfunc(base.node())
1459 out['destination'] = fm.hexfunc(parent.node())
1459 out['destination'] = fm.hexfunc(parent.node())
1460 fm.plain(output % out)
1460 fm.plain(output % out)
1461
1461
1462 fm.end()
1462 fm.end()
1463
1463
1464 @command(b'perfcca', formatteropts)
1464 @command(b'perfcca', formatteropts)
1465 def perfcca(ui, repo, **opts):
1465 def perfcca(ui, repo, **opts):
1466 opts = _byteskwargs(opts)
1466 opts = _byteskwargs(opts)
1467 timer, fm = gettimer(ui, opts)
1467 timer, fm = gettimer(ui, opts)
1468 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1468 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1469 fm.end()
1469 fm.end()
1470
1470
1471 @command(b'perffncacheload', formatteropts)
1471 @command(b'perffncacheload', formatteropts)
1472 def perffncacheload(ui, repo, **opts):
1472 def perffncacheload(ui, repo, **opts):
1473 opts = _byteskwargs(opts)
1473 opts = _byteskwargs(opts)
1474 timer, fm = gettimer(ui, opts)
1474 timer, fm = gettimer(ui, opts)
1475 s = repo.store
1475 s = repo.store
1476 def d():
1476 def d():
1477 s.fncache._load()
1477 s.fncache._load()
1478 timer(d)
1478 timer(d)
1479 fm.end()
1479 fm.end()
1480
1480
1481 @command(b'perffncachewrite', formatteropts)
1481 @command(b'perffncachewrite', formatteropts)
1482 def perffncachewrite(ui, repo, **opts):
1482 def perffncachewrite(ui, repo, **opts):
1483 opts = _byteskwargs(opts)
1483 opts = _byteskwargs(opts)
1484 timer, fm = gettimer(ui, opts)
1484 timer, fm = gettimer(ui, opts)
1485 s = repo.store
1485 s = repo.store
1486 lock = repo.lock()
1486 lock = repo.lock()
1487 s.fncache._load()
1487 s.fncache._load()
1488 tr = repo.transaction(b'perffncachewrite')
1488 tr = repo.transaction(b'perffncachewrite')
1489 tr.addbackup(b'fncache')
1489 tr.addbackup(b'fncache')
1490 def d():
1490 def d():
1491 s.fncache._dirty = True
1491 s.fncache._dirty = True
1492 s.fncache.write(tr)
1492 s.fncache.write(tr)
1493 timer(d)
1493 timer(d)
1494 tr.close()
1494 tr.close()
1495 lock.release()
1495 lock.release()
1496 fm.end()
1496 fm.end()
1497
1497
1498 @command(b'perffncacheencode', formatteropts)
1498 @command(b'perffncacheencode', formatteropts)
1499 def perffncacheencode(ui, repo, **opts):
1499 def perffncacheencode(ui, repo, **opts):
1500 opts = _byteskwargs(opts)
1500 opts = _byteskwargs(opts)
1501 timer, fm = gettimer(ui, opts)
1501 timer, fm = gettimer(ui, opts)
1502 s = repo.store
1502 s = repo.store
1503 s.fncache._load()
1503 s.fncache._load()
1504 def d():
1504 def d():
1505 for p in s.fncache.entries:
1505 for p in s.fncache.entries:
1506 s.encode(p)
1506 s.encode(p)
1507 timer(d)
1507 timer(d)
1508 fm.end()
1508 fm.end()
1509
1509
1510 def _bdiffworker(q, blocks, xdiff, ready, done):
1510 def _bdiffworker(q, blocks, xdiff, ready, done):
1511 while not done.is_set():
1511 while not done.is_set():
1512 pair = q.get()
1512 pair = q.get()
1513 while pair is not None:
1513 while pair is not None:
1514 if xdiff:
1514 if xdiff:
1515 mdiff.bdiff.xdiffblocks(*pair)
1515 mdiff.bdiff.xdiffblocks(*pair)
1516 elif blocks:
1516 elif blocks:
1517 mdiff.bdiff.blocks(*pair)
1517 mdiff.bdiff.blocks(*pair)
1518 else:
1518 else:
1519 mdiff.textdiff(*pair)
1519 mdiff.textdiff(*pair)
1520 q.task_done()
1520 q.task_done()
1521 pair = q.get()
1521 pair = q.get()
1522 q.task_done() # for the None one
1522 q.task_done() # for the None one
1523 with ready:
1523 with ready:
1524 ready.wait()
1524 ready.wait()
1525
1525
1526 def _manifestrevision(repo, mnode):
1526 def _manifestrevision(repo, mnode):
1527 ml = repo.manifestlog
1527 ml = repo.manifestlog
1528
1528
1529 if util.safehasattr(ml, b'getstorage'):
1529 if util.safehasattr(ml, b'getstorage'):
1530 store = ml.getstorage(b'')
1530 store = ml.getstorage(b'')
1531 else:
1531 else:
1532 store = ml._revlog
1532 store = ml._revlog
1533
1533
1534 return store.revision(mnode)
1534 return store.revision(mnode)
1535
1535
1536 @command(b'perfbdiff', revlogopts + formatteropts + [
1536 @command(b'perfbdiff', revlogopts + formatteropts + [
1537 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1537 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1538 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1538 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1539 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1539 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1540 (b'', b'blocks', False, b'test computing diffs into blocks'),
1540 (b'', b'blocks', False, b'test computing diffs into blocks'),
1541 (b'', b'xdiff', False, b'use xdiff algorithm'),
1541 (b'', b'xdiff', False, b'use xdiff algorithm'),
1542 ],
1542 ],
1543
1543
1544 b'-c|-m|FILE REV')
1544 b'-c|-m|FILE REV')
1545 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1545 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1546 """benchmark a bdiff between revisions
1546 """benchmark a bdiff between revisions
1547
1547
1548 By default, benchmark a bdiff between its delta parent and itself.
1548 By default, benchmark a bdiff between its delta parent and itself.
1549
1549
1550 With ``--count``, benchmark bdiffs between delta parents and self for N
1550 With ``--count``, benchmark bdiffs between delta parents and self for N
1551 revisions starting at the specified revision.
1551 revisions starting at the specified revision.
1552
1552
1553 With ``--alldata``, assume the requested revision is a changeset and
1553 With ``--alldata``, assume the requested revision is a changeset and
1554 measure bdiffs for all changes related to that changeset (manifest
1554 measure bdiffs for all changes related to that changeset (manifest
1555 and filelogs).
1555 and filelogs).
1556 """
1556 """
1557 opts = _byteskwargs(opts)
1557 opts = _byteskwargs(opts)
1558
1558
1559 if opts[b'xdiff'] and not opts[b'blocks']:
1559 if opts[b'xdiff'] and not opts[b'blocks']:
1560 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1560 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1561
1561
1562 if opts[b'alldata']:
1562 if opts[b'alldata']:
1563 opts[b'changelog'] = True
1563 opts[b'changelog'] = True
1564
1564
1565 if opts.get(b'changelog') or opts.get(b'manifest'):
1565 if opts.get(b'changelog') or opts.get(b'manifest'):
1566 file_, rev = None, file_
1566 file_, rev = None, file_
1567 elif rev is None:
1567 elif rev is None:
1568 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1568 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1569
1569
1570 blocks = opts[b'blocks']
1570 blocks = opts[b'blocks']
1571 xdiff = opts[b'xdiff']
1571 xdiff = opts[b'xdiff']
1572 textpairs = []
1572 textpairs = []
1573
1573
1574 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1574 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1575
1575
1576 startrev = r.rev(r.lookup(rev))
1576 startrev = r.rev(r.lookup(rev))
1577 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1577 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1578 if opts[b'alldata']:
1578 if opts[b'alldata']:
1579 # Load revisions associated with changeset.
1579 # Load revisions associated with changeset.
1580 ctx = repo[rev]
1580 ctx = repo[rev]
1581 mtext = _manifestrevision(repo, ctx.manifestnode())
1581 mtext = _manifestrevision(repo, ctx.manifestnode())
1582 for pctx in ctx.parents():
1582 for pctx in ctx.parents():
1583 pman = _manifestrevision(repo, pctx.manifestnode())
1583 pman = _manifestrevision(repo, pctx.manifestnode())
1584 textpairs.append((pman, mtext))
1584 textpairs.append((pman, mtext))
1585
1585
1586 # Load filelog revisions by iterating manifest delta.
1586 # Load filelog revisions by iterating manifest delta.
1587 man = ctx.manifest()
1587 man = ctx.manifest()
1588 pman = ctx.p1().manifest()
1588 pman = ctx.p1().manifest()
1589 for filename, change in pman.diff(man).items():
1589 for filename, change in pman.diff(man).items():
1590 fctx = repo.file(filename)
1590 fctx = repo.file(filename)
1591 f1 = fctx.revision(change[0][0] or -1)
1591 f1 = fctx.revision(change[0][0] or -1)
1592 f2 = fctx.revision(change[1][0] or -1)
1592 f2 = fctx.revision(change[1][0] or -1)
1593 textpairs.append((f1, f2))
1593 textpairs.append((f1, f2))
1594 else:
1594 else:
1595 dp = r.deltaparent(rev)
1595 dp = r.deltaparent(rev)
1596 textpairs.append((r.revision(dp), r.revision(rev)))
1596 textpairs.append((r.revision(dp), r.revision(rev)))
1597
1597
1598 withthreads = threads > 0
1598 withthreads = threads > 0
1599 if not withthreads:
1599 if not withthreads:
1600 def d():
1600 def d():
1601 for pair in textpairs:
1601 for pair in textpairs:
1602 if xdiff:
1602 if xdiff:
1603 mdiff.bdiff.xdiffblocks(*pair)
1603 mdiff.bdiff.xdiffblocks(*pair)
1604 elif blocks:
1604 elif blocks:
1605 mdiff.bdiff.blocks(*pair)
1605 mdiff.bdiff.blocks(*pair)
1606 else:
1606 else:
1607 mdiff.textdiff(*pair)
1607 mdiff.textdiff(*pair)
1608 else:
1608 else:
1609 q = queue()
1609 q = queue()
1610 for i in _xrange(threads):
1610 for i in _xrange(threads):
1611 q.put(None)
1611 q.put(None)
1612 ready = threading.Condition()
1612 ready = threading.Condition()
1613 done = threading.Event()
1613 done = threading.Event()
1614 for i in _xrange(threads):
1614 for i in _xrange(threads):
1615 threading.Thread(target=_bdiffworker,
1615 threading.Thread(target=_bdiffworker,
1616 args=(q, blocks, xdiff, ready, done)).start()
1616 args=(q, blocks, xdiff, ready, done)).start()
1617 q.join()
1617 q.join()
1618 def d():
1618 def d():
1619 for pair in textpairs:
1619 for pair in textpairs:
1620 q.put(pair)
1620 q.put(pair)
1621 for i in _xrange(threads):
1621 for i in _xrange(threads):
1622 q.put(None)
1622 q.put(None)
1623 with ready:
1623 with ready:
1624 ready.notify_all()
1624 ready.notify_all()
1625 q.join()
1625 q.join()
1626 timer, fm = gettimer(ui, opts)
1626 timer, fm = gettimer(ui, opts)
1627 timer(d)
1627 timer(d)
1628 fm.end()
1628 fm.end()
1629
1629
1630 if withthreads:
1630 if withthreads:
1631 done.set()
1631 done.set()
1632 for i in _xrange(threads):
1632 for i in _xrange(threads):
1633 q.put(None)
1633 q.put(None)
1634 with ready:
1634 with ready:
1635 ready.notify_all()
1635 ready.notify_all()
1636
1636
1637 @command(b'perfunidiff', revlogopts + formatteropts + [
1637 @command(b'perfunidiff', revlogopts + formatteropts + [
1638 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1638 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1639 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1639 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1640 ], b'-c|-m|FILE REV')
1640 ], b'-c|-m|FILE REV')
1641 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1641 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1642 """benchmark a unified diff between revisions
1642 """benchmark a unified diff between revisions
1643
1643
1644 This doesn't include any copy tracing - it's just a unified diff
1644 This doesn't include any copy tracing - it's just a unified diff
1645 of the texts.
1645 of the texts.
1646
1646
1647 By default, benchmark a diff between its delta parent and itself.
1647 By default, benchmark a diff between its delta parent and itself.
1648
1648
1649 With ``--count``, benchmark diffs between delta parents and self for N
1649 With ``--count``, benchmark diffs between delta parents and self for N
1650 revisions starting at the specified revision.
1650 revisions starting at the specified revision.
1651
1651
1652 With ``--alldata``, assume the requested revision is a changeset and
1652 With ``--alldata``, assume the requested revision is a changeset and
1653 measure diffs for all changes related to that changeset (manifest
1653 measure diffs for all changes related to that changeset (manifest
1654 and filelogs).
1654 and filelogs).
1655 """
1655 """
1656 opts = _byteskwargs(opts)
1656 opts = _byteskwargs(opts)
1657 if opts[b'alldata']:
1657 if opts[b'alldata']:
1658 opts[b'changelog'] = True
1658 opts[b'changelog'] = True
1659
1659
1660 if opts.get(b'changelog') or opts.get(b'manifest'):
1660 if opts.get(b'changelog') or opts.get(b'manifest'):
1661 file_, rev = None, file_
1661 file_, rev = None, file_
1662 elif rev is None:
1662 elif rev is None:
1663 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1663 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1664
1664
1665 textpairs = []
1665 textpairs = []
1666
1666
1667 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1667 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1668
1668
1669 startrev = r.rev(r.lookup(rev))
1669 startrev = r.rev(r.lookup(rev))
1670 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1670 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1671 if opts[b'alldata']:
1671 if opts[b'alldata']:
1672 # Load revisions associated with changeset.
1672 # Load revisions associated with changeset.
1673 ctx = repo[rev]
1673 ctx = repo[rev]
1674 mtext = _manifestrevision(repo, ctx.manifestnode())
1674 mtext = _manifestrevision(repo, ctx.manifestnode())
1675 for pctx in ctx.parents():
1675 for pctx in ctx.parents():
1676 pman = _manifestrevision(repo, pctx.manifestnode())
1676 pman = _manifestrevision(repo, pctx.manifestnode())
1677 textpairs.append((pman, mtext))
1677 textpairs.append((pman, mtext))
1678
1678
1679 # Load filelog revisions by iterating manifest delta.
1679 # Load filelog revisions by iterating manifest delta.
1680 man = ctx.manifest()
1680 man = ctx.manifest()
1681 pman = ctx.p1().manifest()
1681 pman = ctx.p1().manifest()
1682 for filename, change in pman.diff(man).items():
1682 for filename, change in pman.diff(man).items():
1683 fctx = repo.file(filename)
1683 fctx = repo.file(filename)
1684 f1 = fctx.revision(change[0][0] or -1)
1684 f1 = fctx.revision(change[0][0] or -1)
1685 f2 = fctx.revision(change[1][0] or -1)
1685 f2 = fctx.revision(change[1][0] or -1)
1686 textpairs.append((f1, f2))
1686 textpairs.append((f1, f2))
1687 else:
1687 else:
1688 dp = r.deltaparent(rev)
1688 dp = r.deltaparent(rev)
1689 textpairs.append((r.revision(dp), r.revision(rev)))
1689 textpairs.append((r.revision(dp), r.revision(rev)))
1690
1690
1691 def d():
1691 def d():
1692 for left, right in textpairs:
1692 for left, right in textpairs:
1693 # The date strings don't matter, so we pass empty strings.
1693 # The date strings don't matter, so we pass empty strings.
1694 headerlines, hunks = mdiff.unidiff(
1694 headerlines, hunks = mdiff.unidiff(
1695 left, b'', right, b'', b'left', b'right', binary=False)
1695 left, b'', right, b'', b'left', b'right', binary=False)
1696 # consume iterators in roughly the way patch.py does
1696 # consume iterators in roughly the way patch.py does
1697 b'\n'.join(headerlines)
1697 b'\n'.join(headerlines)
1698 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1698 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1699 timer, fm = gettimer(ui, opts)
1699 timer, fm = gettimer(ui, opts)
1700 timer(d)
1700 timer(d)
1701 fm.end()
1701 fm.end()
1702
1702
1703 @command(b'perfdiffwd', formatteropts)
1703 @command(b'perfdiffwd', formatteropts)
1704 def perfdiffwd(ui, repo, **opts):
1704 def perfdiffwd(ui, repo, **opts):
1705 """Profile diff of working directory changes"""
1705 """Profile diff of working directory changes"""
1706 opts = _byteskwargs(opts)
1706 opts = _byteskwargs(opts)
1707 timer, fm = gettimer(ui, opts)
1707 timer, fm = gettimer(ui, opts)
1708 options = {
1708 options = {
1709 'w': 'ignore_all_space',
1709 'w': 'ignore_all_space',
1710 'b': 'ignore_space_change',
1710 'b': 'ignore_space_change',
1711 'B': 'ignore_blank_lines',
1711 'B': 'ignore_blank_lines',
1712 }
1712 }
1713
1713
1714 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1714 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1715 opts = dict((options[c], b'1') for c in diffopt)
1715 opts = dict((options[c], b'1') for c in diffopt)
1716 def d():
1716 def d():
1717 ui.pushbuffer()
1717 ui.pushbuffer()
1718 commands.diff(ui, repo, **opts)
1718 commands.diff(ui, repo, **opts)
1719 ui.popbuffer()
1719 ui.popbuffer()
1720 diffopt = diffopt.encode('ascii')
1720 diffopt = diffopt.encode('ascii')
1721 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1721 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1722 timer(d, title=title)
1722 timer(d, title=title)
1723 fm.end()
1723 fm.end()
1724
1724
1725 @command(b'perfrevlogindex', revlogopts + formatteropts,
1725 @command(b'perfrevlogindex', revlogopts + formatteropts,
1726 b'-c|-m|FILE')
1726 b'-c|-m|FILE')
1727 def perfrevlogindex(ui, repo, file_=None, **opts):
1727 def perfrevlogindex(ui, repo, file_=None, **opts):
1728 """Benchmark operations against a revlog index.
1728 """Benchmark operations against a revlog index.
1729
1729
1730 This tests constructing a revlog instance, reading index data,
1730 This tests constructing a revlog instance, reading index data,
1731 parsing index data, and performing various operations related to
1731 parsing index data, and performing various operations related to
1732 index data.
1732 index data.
1733 """
1733 """
1734
1734
1735 opts = _byteskwargs(opts)
1735 opts = _byteskwargs(opts)
1736
1736
1737 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1737 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1738
1738
1739 opener = getattr(rl, 'opener') # trick linter
1739 opener = getattr(rl, 'opener') # trick linter
1740 indexfile = rl.indexfile
1740 indexfile = rl.indexfile
1741 data = opener.read(indexfile)
1741 data = opener.read(indexfile)
1742
1742
1743 header = struct.unpack(b'>I', data[0:4])[0]
1743 header = struct.unpack(b'>I', data[0:4])[0]
1744 version = header & 0xFFFF
1744 version = header & 0xFFFF
1745 if version == 1:
1745 if version == 1:
1746 revlogio = revlog.revlogio()
1746 revlogio = revlog.revlogio()
1747 inline = header & (1 << 16)
1747 inline = header & (1 << 16)
1748 else:
1748 else:
1749 raise error.Abort((b'unsupported revlog version: %d') % version)
1749 raise error.Abort((b'unsupported revlog version: %d') % version)
1750
1750
1751 rllen = len(rl)
1751 rllen = len(rl)
1752
1752
1753 node0 = rl.node(0)
1753 node0 = rl.node(0)
1754 node25 = rl.node(rllen // 4)
1754 node25 = rl.node(rllen // 4)
1755 node50 = rl.node(rllen // 2)
1755 node50 = rl.node(rllen // 2)
1756 node75 = rl.node(rllen // 4 * 3)
1756 node75 = rl.node(rllen // 4 * 3)
1757 node100 = rl.node(rllen - 1)
1757 node100 = rl.node(rllen - 1)
1758
1758
1759 allrevs = range(rllen)
1759 allrevs = range(rllen)
1760 allrevsrev = list(reversed(allrevs))
1760 allrevsrev = list(reversed(allrevs))
1761 allnodes = [rl.node(rev) for rev in range(rllen)]
1761 allnodes = [rl.node(rev) for rev in range(rllen)]
1762 allnodesrev = list(reversed(allnodes))
1762 allnodesrev = list(reversed(allnodes))
1763
1763
1764 def constructor():
1764 def constructor():
1765 revlog.revlog(opener, indexfile)
1765 revlog.revlog(opener, indexfile)
1766
1766
1767 def read():
1767 def read():
1768 with opener(indexfile) as fh:
1768 with opener(indexfile) as fh:
1769 fh.read()
1769 fh.read()
1770
1770
1771 def parseindex():
1771 def parseindex():
1772 revlogio.parseindex(data, inline)
1772 revlogio.parseindex(data, inline)
1773
1773
1774 def getentry(revornode):
1774 def getentry(revornode):
1775 index = revlogio.parseindex(data, inline)[0]
1775 index = revlogio.parseindex(data, inline)[0]
1776 index[revornode]
1776 index[revornode]
1777
1777
1778 def getentries(revs, count=1):
1778 def getentries(revs, count=1):
1779 index = revlogio.parseindex(data, inline)[0]
1779 index = revlogio.parseindex(data, inline)[0]
1780
1780
1781 for i in range(count):
1781 for i in range(count):
1782 for rev in revs:
1782 for rev in revs:
1783 index[rev]
1783 index[rev]
1784
1784
1785 def resolvenode(node):
1785 def resolvenode(node):
1786 nodemap = revlogio.parseindex(data, inline)[1]
1786 nodemap = revlogio.parseindex(data, inline)[1]
1787 # This only works for the C code.
1787 # This only works for the C code.
1788 if nodemap is None:
1788 if nodemap is None:
1789 return
1789 return
1790
1790
1791 try:
1791 try:
1792 nodemap[node]
1792 nodemap[node]
1793 except error.RevlogError:
1793 except error.RevlogError:
1794 pass
1794 pass
1795
1795
1796 def resolvenodes(nodes, count=1):
1796 def resolvenodes(nodes, count=1):
1797 nodemap = revlogio.parseindex(data, inline)[1]
1797 nodemap = revlogio.parseindex(data, inline)[1]
1798 if nodemap is None:
1798 if nodemap is None:
1799 return
1799 return
1800
1800
1801 for i in range(count):
1801 for i in range(count):
1802 for node in nodes:
1802 for node in nodes:
1803 try:
1803 try:
1804 nodemap[node]
1804 nodemap[node]
1805 except error.RevlogError:
1805 except error.RevlogError:
1806 pass
1806 pass
1807
1807
1808 benches = [
1808 benches = [
1809 (constructor, b'revlog constructor'),
1809 (constructor, b'revlog constructor'),
1810 (read, b'read'),
1810 (read, b'read'),
1811 (parseindex, b'create index object'),
1811 (parseindex, b'create index object'),
1812 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1812 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1813 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1813 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1814 (lambda: resolvenode(node0), b'look up node at rev 0'),
1814 (lambda: resolvenode(node0), b'look up node at rev 0'),
1815 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1815 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1816 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1816 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1817 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1817 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1818 (lambda: resolvenode(node100), b'look up node at tip'),
1818 (lambda: resolvenode(node100), b'look up node at tip'),
1819 # 2x variation is to measure caching impact.
1819 # 2x variation is to measure caching impact.
1820 (lambda: resolvenodes(allnodes),
1820 (lambda: resolvenodes(allnodes),
1821 b'look up all nodes (forward)'),
1821 b'look up all nodes (forward)'),
1822 (lambda: resolvenodes(allnodes, 2),
1822 (lambda: resolvenodes(allnodes, 2),
1823 b'look up all nodes 2x (forward)'),
1823 b'look up all nodes 2x (forward)'),
1824 (lambda: resolvenodes(allnodesrev),
1824 (lambda: resolvenodes(allnodesrev),
1825 b'look up all nodes (reverse)'),
1825 b'look up all nodes (reverse)'),
1826 (lambda: resolvenodes(allnodesrev, 2),
1826 (lambda: resolvenodes(allnodesrev, 2),
1827 b'look up all nodes 2x (reverse)'),
1827 b'look up all nodes 2x (reverse)'),
1828 (lambda: getentries(allrevs),
1828 (lambda: getentries(allrevs),
1829 b'retrieve all index entries (forward)'),
1829 b'retrieve all index entries (forward)'),
1830 (lambda: getentries(allrevs, 2),
1830 (lambda: getentries(allrevs, 2),
1831 b'retrieve all index entries 2x (forward)'),
1831 b'retrieve all index entries 2x (forward)'),
1832 (lambda: getentries(allrevsrev),
1832 (lambda: getentries(allrevsrev),
1833 b'retrieve all index entries (reverse)'),
1833 b'retrieve all index entries (reverse)'),
1834 (lambda: getentries(allrevsrev, 2),
1834 (lambda: getentries(allrevsrev, 2),
1835 b'retrieve all index entries 2x (reverse)'),
1835 b'retrieve all index entries 2x (reverse)'),
1836 ]
1836 ]
1837
1837
1838 for fn, title in benches:
1838 for fn, title in benches:
1839 timer, fm = gettimer(ui, opts)
1839 timer, fm = gettimer(ui, opts)
1840 timer(fn, title=title)
1840 timer(fn, title=title)
1841 fm.end()
1841 fm.end()
1842
1842
1843 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1843 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1844 [(b'd', b'dist', 100, b'distance between the revisions'),
1844 [(b'd', b'dist', 100, b'distance between the revisions'),
1845 (b's', b'startrev', 0, b'revision to start reading at'),
1845 (b's', b'startrev', 0, b'revision to start reading at'),
1846 (b'', b'reverse', False, b'read in reverse')],
1846 (b'', b'reverse', False, b'read in reverse')],
1847 b'-c|-m|FILE')
1847 b'-c|-m|FILE')
1848 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1848 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1849 **opts):
1849 **opts):
1850 """Benchmark reading a series of revisions from a revlog.
1850 """Benchmark reading a series of revisions from a revlog.
1851
1851
1852 By default, we read every ``-d/--dist`` revision from 0 to tip of
1852 By default, we read every ``-d/--dist`` revision from 0 to tip of
1853 the specified revlog.
1853 the specified revlog.
1854
1854
1855 The start revision can be defined via ``-s/--startrev``.
1855 The start revision can be defined via ``-s/--startrev``.
1856 """
1856 """
1857 opts = _byteskwargs(opts)
1857 opts = _byteskwargs(opts)
1858
1858
1859 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1859 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1860 rllen = getlen(ui)(rl)
1860 rllen = getlen(ui)(rl)
1861
1861
1862 if startrev < 0:
1862 if startrev < 0:
1863 startrev = rllen + startrev
1863 startrev = rllen + startrev
1864
1864
1865 def d():
1865 def d():
1866 rl.clearcaches()
1866 rl.clearcaches()
1867
1867
1868 beginrev = startrev
1868 beginrev = startrev
1869 endrev = rllen
1869 endrev = rllen
1870 dist = opts[b'dist']
1870 dist = opts[b'dist']
1871
1871
1872 if reverse:
1872 if reverse:
1873 beginrev, endrev = endrev - 1, beginrev - 1
1873 beginrev, endrev = endrev - 1, beginrev - 1
1874 dist = -1 * dist
1874 dist = -1 * dist
1875
1875
1876 for x in _xrange(beginrev, endrev, dist):
1876 for x in _xrange(beginrev, endrev, dist):
1877 # Old revisions don't support passing int.
1877 # Old revisions don't support passing int.
1878 n = rl.node(x)
1878 n = rl.node(x)
1879 rl.revision(n)
1879 rl.revision(n)
1880
1880
1881 timer, fm = gettimer(ui, opts)
1881 timer, fm = gettimer(ui, opts)
1882 timer(d)
1882 timer(d)
1883 fm.end()
1883 fm.end()
1884
1884
1885 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1885 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1886 [(b's', b'startrev', 1000, b'revision to start writing at'),
1886 [(b's', b'startrev', 1000, b'revision to start writing at'),
1887 (b'', b'stoprev', -1, b'last revision to write'),
1887 (b'', b'stoprev', -1, b'last revision to write'),
1888 (b'', b'count', 3, b'last revision to write'),
1888 (b'', b'count', 3, b'last revision to write'),
1889 (b'', b'details', False, b'print timing for every revisions tested'),
1889 (b'', b'details', False, b'print timing for every revisions tested'),
1890 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1890 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1891 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1891 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1892 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1892 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1893 ],
1893 ],
1894 b'-c|-m|FILE')
1894 b'-c|-m|FILE')
1895 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1895 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1896 """Benchmark writing a series of revisions to a revlog.
1896 """Benchmark writing a series of revisions to a revlog.
1897
1897
1898 Possible source values are:
1898 Possible source values are:
1899 * `full`: add from a full text (default).
1899 * `full`: add from a full text (default).
1900 * `parent-1`: add from a delta to the first parent
1900 * `parent-1`: add from a delta to the first parent
1901 * `parent-2`: add from a delta to the second parent if it exists
1901 * `parent-2`: add from a delta to the second parent if it exists
1902 (use a delta from the first parent otherwise)
1902 (use a delta from the first parent otherwise)
1903 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1903 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1904 * `storage`: add from the existing precomputed deltas
1904 * `storage`: add from the existing precomputed deltas
1905 """
1905 """
1906 opts = _byteskwargs(opts)
1906 opts = _byteskwargs(opts)
1907
1907
1908 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1908 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1909 rllen = getlen(ui)(rl)
1909 rllen = getlen(ui)(rl)
1910 if startrev < 0:
1910 if startrev < 0:
1911 startrev = rllen + startrev
1911 startrev = rllen + startrev
1912 if stoprev < 0:
1912 if stoprev < 0:
1913 stoprev = rllen + stoprev
1913 stoprev = rllen + stoprev
1914
1914
1915 lazydeltabase = opts['lazydeltabase']
1915 lazydeltabase = opts['lazydeltabase']
1916 source = opts['source']
1916 source = opts['source']
1917 clearcaches = opts['clear_caches']
1917 clearcaches = opts['clear_caches']
1918 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1918 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1919 b'storage')
1919 b'storage')
1920 if source not in validsource:
1920 if source not in validsource:
1921 raise error.Abort('invalid source type: %s' % source)
1921 raise error.Abort('invalid source type: %s' % source)
1922
1922
1923 ### actually gather results
1923 ### actually gather results
1924 count = opts['count']
1924 count = opts['count']
1925 if count <= 0:
1925 if count <= 0:
1926 raise error.Abort('invalide run count: %d' % count)
1926 raise error.Abort('invalide run count: %d' % count)
1927 allresults = []
1927 allresults = []
1928 for c in range(count):
1928 for c in range(count):
1929 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1929 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1930 lazydeltabase=lazydeltabase,
1930 lazydeltabase=lazydeltabase,
1931 clearcaches=clearcaches)
1931 clearcaches=clearcaches)
1932 allresults.append(timing)
1932 allresults.append(timing)
1933
1933
1934 ### consolidate the results in a single list
1934 ### consolidate the results in a single list
1935 results = []
1935 results = []
1936 for idx, (rev, t) in enumerate(allresults[0]):
1936 for idx, (rev, t) in enumerate(allresults[0]):
1937 ts = [t]
1937 ts = [t]
1938 for other in allresults[1:]:
1938 for other in allresults[1:]:
1939 orev, ot = other[idx]
1939 orev, ot = other[idx]
1940 assert orev == rev
1940 assert orev == rev
1941 ts.append(ot)
1941 ts.append(ot)
1942 results.append((rev, ts))
1942 results.append((rev, ts))
1943 resultcount = len(results)
1943 resultcount = len(results)
1944
1944
1945 ### Compute and display relevant statistics
1945 ### Compute and display relevant statistics
1946
1946
1947 # get a formatter
1947 # get a formatter
1948 fm = ui.formatter(b'perf', opts)
1948 fm = ui.formatter(b'perf', opts)
1949 displayall = ui.configbool(b"perf", b"all-timing", False)
1949 displayall = ui.configbool(b"perf", b"all-timing", False)
1950
1950
1951 # print individual details if requested
1951 # print individual details if requested
1952 if opts['details']:
1952 if opts['details']:
1953 for idx, item in enumerate(results, 1):
1953 for idx, item in enumerate(results, 1):
1954 rev, data = item
1954 rev, data = item
1955 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1955 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1956 formatone(fm, data, title=title, displayall=displayall)
1956 formatone(fm, data, title=title, displayall=displayall)
1957
1957
1958 # sorts results by median time
1958 # sorts results by median time
1959 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1959 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1960 # list of (name, index) to display)
1960 # list of (name, index) to display)
1961 relevants = [
1961 relevants = [
1962 ("min", 0),
1962 ("min", 0),
1963 ("10%", resultcount * 10 // 100),
1963 ("10%", resultcount * 10 // 100),
1964 ("25%", resultcount * 25 // 100),
1964 ("25%", resultcount * 25 // 100),
1965 ("50%", resultcount * 70 // 100),
1965 ("50%", resultcount * 70 // 100),
1966 ("75%", resultcount * 75 // 100),
1966 ("75%", resultcount * 75 // 100),
1967 ("90%", resultcount * 90 // 100),
1967 ("90%", resultcount * 90 // 100),
1968 ("95%", resultcount * 95 // 100),
1968 ("95%", resultcount * 95 // 100),
1969 ("99%", resultcount * 99 // 100),
1969 ("99%", resultcount * 99 // 100),
1970 ("99.9%", resultcount * 999 // 1000),
1970 ("99.9%", resultcount * 999 // 1000),
1971 ("99.99%", resultcount * 9999 // 10000),
1971 ("99.99%", resultcount * 9999 // 10000),
1972 ("99.999%", resultcount * 99999 // 100000),
1972 ("99.999%", resultcount * 99999 // 100000),
1973 ("max", -1),
1973 ("max", -1),
1974 ]
1974 ]
1975 if not ui.quiet:
1975 if not ui.quiet:
1976 for name, idx in relevants:
1976 for name, idx in relevants:
1977 data = results[idx]
1977 data = results[idx]
1978 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1978 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1979 formatone(fm, data[1], title=title, displayall=displayall)
1979 formatone(fm, data[1], title=title, displayall=displayall)
1980
1980
1981 # XXX summing that many float will not be very precise, we ignore this fact
1981 # XXX summing that many float will not be very precise, we ignore this fact
1982 # for now
1982 # for now
1983 totaltime = []
1983 totaltime = []
1984 for item in allresults:
1984 for item in allresults:
1985 totaltime.append((sum(x[1][0] for x in item),
1985 totaltime.append((sum(x[1][0] for x in item),
1986 sum(x[1][1] for x in item),
1986 sum(x[1][1] for x in item),
1987 sum(x[1][2] for x in item),)
1987 sum(x[1][2] for x in item),)
1988 )
1988 )
1989 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1989 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1990 displayall=displayall)
1990 displayall=displayall)
1991 fm.end()
1991 fm.end()
1992
1992
1993 class _faketr(object):
1993 class _faketr(object):
1994 def add(s, x, y, z=None):
1994 def add(s, x, y, z=None):
1995 return None
1995 return None
1996
1996
1997 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1997 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1998 lazydeltabase=True, clearcaches=True):
1998 lazydeltabase=True, clearcaches=True):
1999 timings = []
1999 timings = []
2000 tr = _faketr()
2000 tr = _faketr()
2001 with _temprevlog(ui, orig, startrev) as dest:
2001 with _temprevlog(ui, orig, startrev) as dest:
2002 dest._lazydeltabase = lazydeltabase
2002 dest._lazydeltabase = lazydeltabase
2003 revs = list(orig.revs(startrev, stoprev))
2003 revs = list(orig.revs(startrev, stoprev))
2004 total = len(revs)
2004 total = len(revs)
2005 topic = 'adding'
2005 topic = 'adding'
2006 if runidx is not None:
2006 if runidx is not None:
2007 topic += ' (run #%d)' % runidx
2007 topic += ' (run #%d)' % runidx
2008 # Support both old and new progress API
2008 # Support both old and new progress API
2009 if util.safehasattr(ui, 'makeprogress'):
2009 if util.safehasattr(ui, 'makeprogress'):
2010 progress = ui.makeprogress(topic, unit='revs', total=total)
2010 progress = ui.makeprogress(topic, unit='revs', total=total)
2011 def updateprogress(pos):
2011 def updateprogress(pos):
2012 progress.update(pos)
2012 progress.update(pos)
2013 def completeprogress():
2013 def completeprogress():
2014 progress.complete()
2014 progress.complete()
2015 else:
2015 else:
2016 def updateprogress(pos):
2016 def updateprogress(pos):
2017 ui.progress(topic, pos, unit='revs', total=total)
2017 ui.progress(topic, pos, unit='revs', total=total)
2018 def completeprogress():
2018 def completeprogress():
2019 ui.progress(topic, None, unit='revs', total=total)
2019 ui.progress(topic, None, unit='revs', total=total)
2020
2020
2021 for idx, rev in enumerate(revs):
2021 for idx, rev in enumerate(revs):
2022 updateprogress(idx)
2022 updateprogress(idx)
2023 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2023 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2024 if clearcaches:
2024 if clearcaches:
2025 dest.index.clearcaches()
2025 dest.index.clearcaches()
2026 dest.clearcaches()
2026 dest.clearcaches()
2027 with timeone() as r:
2027 with timeone() as r:
2028 dest.addrawrevision(*addargs, **addkwargs)
2028 dest.addrawrevision(*addargs, **addkwargs)
2029 timings.append((rev, r[0]))
2029 timings.append((rev, r[0]))
2030 updateprogress(total)
2030 updateprogress(total)
2031 completeprogress()
2031 completeprogress()
2032 return timings
2032 return timings
2033
2033
2034 def _getrevisionseed(orig, rev, tr, source):
2034 def _getrevisionseed(orig, rev, tr, source):
2035 from mercurial.node import nullid
2035 from mercurial.node import nullid
2036
2036
2037 linkrev = orig.linkrev(rev)
2037 linkrev = orig.linkrev(rev)
2038 node = orig.node(rev)
2038 node = orig.node(rev)
2039 p1, p2 = orig.parents(node)
2039 p1, p2 = orig.parents(node)
2040 flags = orig.flags(rev)
2040 flags = orig.flags(rev)
2041 cachedelta = None
2041 cachedelta = None
2042 text = None
2042 text = None
2043
2043
2044 if source == b'full':
2044 if source == b'full':
2045 text = orig.revision(rev)
2045 text = orig.revision(rev)
2046 elif source == b'parent-1':
2046 elif source == b'parent-1':
2047 baserev = orig.rev(p1)
2047 baserev = orig.rev(p1)
2048 cachedelta = (baserev, orig.revdiff(p1, rev))
2048 cachedelta = (baserev, orig.revdiff(p1, rev))
2049 elif source == b'parent-2':
2049 elif source == b'parent-2':
2050 parent = p2
2050 parent = p2
2051 if p2 == nullid:
2051 if p2 == nullid:
2052 parent = p1
2052 parent = p1
2053 baserev = orig.rev(parent)
2053 baserev = orig.rev(parent)
2054 cachedelta = (baserev, orig.revdiff(parent, rev))
2054 cachedelta = (baserev, orig.revdiff(parent, rev))
2055 elif source == b'parent-smallest':
2055 elif source == b'parent-smallest':
2056 p1diff = orig.revdiff(p1, rev)
2056 p1diff = orig.revdiff(p1, rev)
2057 parent = p1
2057 parent = p1
2058 diff = p1diff
2058 diff = p1diff
2059 if p2 != nullid:
2059 if p2 != nullid:
2060 p2diff = orig.revdiff(p2, rev)
2060 p2diff = orig.revdiff(p2, rev)
2061 if len(p1diff) > len(p2diff):
2061 if len(p1diff) > len(p2diff):
2062 parent = p2
2062 parent = p2
2063 diff = p2diff
2063 diff = p2diff
2064 baserev = orig.rev(parent)
2064 baserev = orig.rev(parent)
2065 cachedelta = (baserev, diff)
2065 cachedelta = (baserev, diff)
2066 elif source == b'storage':
2066 elif source == b'storage':
2067 baserev = orig.deltaparent(rev)
2067 baserev = orig.deltaparent(rev)
2068 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2068 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2069
2069
2070 return ((text, tr, linkrev, p1, p2),
2070 return ((text, tr, linkrev, p1, p2),
2071 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2071 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2072
2072
2073 @contextlib.contextmanager
2073 @contextlib.contextmanager
2074 def _temprevlog(ui, orig, truncaterev):
2074 def _temprevlog(ui, orig, truncaterev):
2075 from mercurial import vfs as vfsmod
2075 from mercurial import vfs as vfsmod
2076
2076
2077 if orig._inline:
2077 if orig._inline:
2078 raise error.Abort('not supporting inline revlog (yet)')
2078 raise error.Abort('not supporting inline revlog (yet)')
2079
2079
2080 origindexpath = orig.opener.join(orig.indexfile)
2080 origindexpath = orig.opener.join(orig.indexfile)
2081 origdatapath = orig.opener.join(orig.datafile)
2081 origdatapath = orig.opener.join(orig.datafile)
2082 indexname = 'revlog.i'
2082 indexname = 'revlog.i'
2083 dataname = 'revlog.d'
2083 dataname = 'revlog.d'
2084
2084
2085 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2085 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2086 try:
2086 try:
2087 # copy the data file in a temporary directory
2087 # copy the data file in a temporary directory
2088 ui.debug('copying data in %s\n' % tmpdir)
2088 ui.debug('copying data in %s\n' % tmpdir)
2089 destindexpath = os.path.join(tmpdir, 'revlog.i')
2089 destindexpath = os.path.join(tmpdir, 'revlog.i')
2090 destdatapath = os.path.join(tmpdir, 'revlog.d')
2090 destdatapath = os.path.join(tmpdir, 'revlog.d')
2091 shutil.copyfile(origindexpath, destindexpath)
2091 shutil.copyfile(origindexpath, destindexpath)
2092 shutil.copyfile(origdatapath, destdatapath)
2092 shutil.copyfile(origdatapath, destdatapath)
2093
2093
2094 # remove the data we want to add again
2094 # remove the data we want to add again
2095 ui.debug('truncating data to be rewritten\n')
2095 ui.debug('truncating data to be rewritten\n')
2096 with open(destindexpath, 'ab') as index:
2096 with open(destindexpath, 'ab') as index:
2097 index.seek(0)
2097 index.seek(0)
2098 index.truncate(truncaterev * orig._io.size)
2098 index.truncate(truncaterev * orig._io.size)
2099 with open(destdatapath, 'ab') as data:
2099 with open(destdatapath, 'ab') as data:
2100 data.seek(0)
2100 data.seek(0)
2101 data.truncate(orig.start(truncaterev))
2101 data.truncate(orig.start(truncaterev))
2102
2102
2103 # instantiate a new revlog from the temporary copy
2103 # instantiate a new revlog from the temporary copy
2104 ui.debug('truncating adding to be rewritten\n')
2104 ui.debug('truncating adding to be rewritten\n')
2105 vfs = vfsmod.vfs(tmpdir)
2105 vfs = vfsmod.vfs(tmpdir)
2106 vfs.options = getattr(orig.opener, 'options', None)
2106 vfs.options = getattr(orig.opener, 'options', None)
2107
2107
2108 dest = revlog.revlog(vfs,
2108 dest = revlog.revlog(vfs,
2109 indexfile=indexname,
2109 indexfile=indexname,
2110 datafile=dataname)
2110 datafile=dataname)
2111 if dest._inline:
2111 if dest._inline:
2112 raise error.Abort('not supporting inline revlog (yet)')
2112 raise error.Abort('not supporting inline revlog (yet)')
2113 # make sure internals are initialized
2113 # make sure internals are initialized
2114 dest.revision(len(dest) - 1)
2114 dest.revision(len(dest) - 1)
2115 yield dest
2115 yield dest
2116 del dest, vfs
2116 del dest, vfs
2117 finally:
2117 finally:
2118 shutil.rmtree(tmpdir, True)
2118 shutil.rmtree(tmpdir, True)
2119
2119
2120 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2120 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2121 [(b'e', b'engines', b'', b'compression engines to use'),
2121 [(b'e', b'engines', b'', b'compression engines to use'),
2122 (b's', b'startrev', 0, b'revision to start at')],
2122 (b's', b'startrev', 0, b'revision to start at')],
2123 b'-c|-m|FILE')
2123 b'-c|-m|FILE')
2124 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2124 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2125 """Benchmark operations on revlog chunks.
2125 """Benchmark operations on revlog chunks.
2126
2126
2127 Logically, each revlog is a collection of fulltext revisions. However,
2127 Logically, each revlog is a collection of fulltext revisions. However,
2128 stored within each revlog are "chunks" of possibly compressed data. This
2128 stored within each revlog are "chunks" of possibly compressed data. This
2129 data needs to be read and decompressed or compressed and written.
2129 data needs to be read and decompressed or compressed and written.
2130
2130
2131 This command measures the time it takes to read+decompress and recompress
2131 This command measures the time it takes to read+decompress and recompress
2132 chunks in a revlog. It effectively isolates I/O and compression performance.
2132 chunks in a revlog. It effectively isolates I/O and compression performance.
2133 For measurements of higher-level operations like resolving revisions,
2133 For measurements of higher-level operations like resolving revisions,
2134 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2134 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2135 """
2135 """
2136 opts = _byteskwargs(opts)
2136 opts = _byteskwargs(opts)
2137
2137
2138 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2138 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2139
2139
2140 # _chunkraw was renamed to _getsegmentforrevs.
2140 # _chunkraw was renamed to _getsegmentforrevs.
2141 try:
2141 try:
2142 segmentforrevs = rl._getsegmentforrevs
2142 segmentforrevs = rl._getsegmentforrevs
2143 except AttributeError:
2143 except AttributeError:
2144 segmentforrevs = rl._chunkraw
2144 segmentforrevs = rl._chunkraw
2145
2145
2146 # Verify engines argument.
2146 # Verify engines argument.
2147 if engines:
2147 if engines:
2148 engines = set(e.strip() for e in engines.split(b','))
2148 engines = set(e.strip() for e in engines.split(b','))
2149 for engine in engines:
2149 for engine in engines:
2150 try:
2150 try:
2151 util.compressionengines[engine]
2151 util.compressionengines[engine]
2152 except KeyError:
2152 except KeyError:
2153 raise error.Abort(b'unknown compression engine: %s' % engine)
2153 raise error.Abort(b'unknown compression engine: %s' % engine)
2154 else:
2154 else:
2155 engines = []
2155 engines = []
2156 for e in util.compengines:
2156 for e in util.compengines:
2157 engine = util.compengines[e]
2157 engine = util.compengines[e]
2158 try:
2158 try:
2159 if engine.available():
2159 if engine.available():
2160 engine.revlogcompressor().compress(b'dummy')
2160 engine.revlogcompressor().compress(b'dummy')
2161 engines.append(e)
2161 engines.append(e)
2162 except NotImplementedError:
2162 except NotImplementedError:
2163 pass
2163 pass
2164
2164
2165 revs = list(rl.revs(startrev, len(rl) - 1))
2165 revs = list(rl.revs(startrev, len(rl) - 1))
2166
2166
2167 def rlfh(rl):
2167 def rlfh(rl):
2168 if rl._inline:
2168 if rl._inline:
2169 return getsvfs(repo)(rl.indexfile)
2169 return getsvfs(repo)(rl.indexfile)
2170 else:
2170 else:
2171 return getsvfs(repo)(rl.datafile)
2171 return getsvfs(repo)(rl.datafile)
2172
2172
2173 def doread():
2173 def doread():
2174 rl.clearcaches()
2174 rl.clearcaches()
2175 for rev in revs:
2175 for rev in revs:
2176 segmentforrevs(rev, rev)
2176 segmentforrevs(rev, rev)
2177
2177
2178 def doreadcachedfh():
2178 def doreadcachedfh():
2179 rl.clearcaches()
2179 rl.clearcaches()
2180 fh = rlfh(rl)
2180 fh = rlfh(rl)
2181 for rev in revs:
2181 for rev in revs:
2182 segmentforrevs(rev, rev, df=fh)
2182 segmentforrevs(rev, rev, df=fh)
2183
2183
2184 def doreadbatch():
2184 def doreadbatch():
2185 rl.clearcaches()
2185 rl.clearcaches()
2186 segmentforrevs(revs[0], revs[-1])
2186 segmentforrevs(revs[0], revs[-1])
2187
2187
2188 def doreadbatchcachedfh():
2188 def doreadbatchcachedfh():
2189 rl.clearcaches()
2189 rl.clearcaches()
2190 fh = rlfh(rl)
2190 fh = rlfh(rl)
2191 segmentforrevs(revs[0], revs[-1], df=fh)
2191 segmentforrevs(revs[0], revs[-1], df=fh)
2192
2192
2193 def dochunk():
2193 def dochunk():
2194 rl.clearcaches()
2194 rl.clearcaches()
2195 fh = rlfh(rl)
2195 fh = rlfh(rl)
2196 for rev in revs:
2196 for rev in revs:
2197 rl._chunk(rev, df=fh)
2197 rl._chunk(rev, df=fh)
2198
2198
2199 chunks = [None]
2199 chunks = [None]
2200
2200
2201 def dochunkbatch():
2201 def dochunkbatch():
2202 rl.clearcaches()
2202 rl.clearcaches()
2203 fh = rlfh(rl)
2203 fh = rlfh(rl)
2204 # Save chunks as a side-effect.
2204 # Save chunks as a side-effect.
2205 chunks[0] = rl._chunks(revs, df=fh)
2205 chunks[0] = rl._chunks(revs, df=fh)
2206
2206
2207 def docompress(compressor):
2207 def docompress(compressor):
2208 rl.clearcaches()
2208 rl.clearcaches()
2209
2209
2210 try:
2210 try:
2211 # Swap in the requested compression engine.
2211 # Swap in the requested compression engine.
2212 oldcompressor = rl._compressor
2212 oldcompressor = rl._compressor
2213 rl._compressor = compressor
2213 rl._compressor = compressor
2214 for chunk in chunks[0]:
2214 for chunk in chunks[0]:
2215 rl.compress(chunk)
2215 rl.compress(chunk)
2216 finally:
2216 finally:
2217 rl._compressor = oldcompressor
2217 rl._compressor = oldcompressor
2218
2218
2219 benches = [
2219 benches = [
2220 (lambda: doread(), b'read'),
2220 (lambda: doread(), b'read'),
2221 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2221 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2222 (lambda: doreadbatch(), b'read batch'),
2222 (lambda: doreadbatch(), b'read batch'),
2223 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2223 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2224 (lambda: dochunk(), b'chunk'),
2224 (lambda: dochunk(), b'chunk'),
2225 (lambda: dochunkbatch(), b'chunk batch'),
2225 (lambda: dochunkbatch(), b'chunk batch'),
2226 ]
2226 ]
2227
2227
2228 for engine in sorted(engines):
2228 for engine in sorted(engines):
2229 compressor = util.compengines[engine].revlogcompressor()
2229 compressor = util.compengines[engine].revlogcompressor()
2230 benches.append((functools.partial(docompress, compressor),
2230 benches.append((functools.partial(docompress, compressor),
2231 b'compress w/ %s' % engine))
2231 b'compress w/ %s' % engine))
2232
2232
2233 for fn, title in benches:
2233 for fn, title in benches:
2234 timer, fm = gettimer(ui, opts)
2234 timer, fm = gettimer(ui, opts)
2235 timer(fn, title=title)
2235 timer(fn, title=title)
2236 fm.end()
2236 fm.end()
2237
2237
2238 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2238 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2239 [(b'', b'cache', False, b'use caches instead of clearing')],
2239 [(b'', b'cache', False, b'use caches instead of clearing')],
2240 b'-c|-m|FILE REV')
2240 b'-c|-m|FILE REV')
2241 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2241 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2242 """Benchmark obtaining a revlog revision.
2242 """Benchmark obtaining a revlog revision.
2243
2243
2244 Obtaining a revlog revision consists of roughly the following steps:
2244 Obtaining a revlog revision consists of roughly the following steps:
2245
2245
2246 1. Compute the delta chain
2246 1. Compute the delta chain
2247 2. Slice the delta chain if applicable
2247 2. Slice the delta chain if applicable
2248 3. Obtain the raw chunks for that delta chain
2248 3. Obtain the raw chunks for that delta chain
2249 4. Decompress each raw chunk
2249 4. Decompress each raw chunk
2250 5. Apply binary patches to obtain fulltext
2250 5. Apply binary patches to obtain fulltext
2251 6. Verify hash of fulltext
2251 6. Verify hash of fulltext
2252
2252
2253 This command measures the time spent in each of these phases.
2253 This command measures the time spent in each of these phases.
2254 """
2254 """
2255 opts = _byteskwargs(opts)
2255 opts = _byteskwargs(opts)
2256
2256
2257 if opts.get(b'changelog') or opts.get(b'manifest'):
2257 if opts.get(b'changelog') or opts.get(b'manifest'):
2258 file_, rev = None, file_
2258 file_, rev = None, file_
2259 elif rev is None:
2259 elif rev is None:
2260 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2260 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2261
2261
2262 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2262 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2263
2263
2264 # _chunkraw was renamed to _getsegmentforrevs.
2264 # _chunkraw was renamed to _getsegmentforrevs.
2265 try:
2265 try:
2266 segmentforrevs = r._getsegmentforrevs
2266 segmentforrevs = r._getsegmentforrevs
2267 except AttributeError:
2267 except AttributeError:
2268 segmentforrevs = r._chunkraw
2268 segmentforrevs = r._chunkraw
2269
2269
2270 node = r.lookup(rev)
2270 node = r.lookup(rev)
2271 rev = r.rev(node)
2271 rev = r.rev(node)
2272
2272
2273 def getrawchunks(data, chain):
2273 def getrawchunks(data, chain):
2274 start = r.start
2274 start = r.start
2275 length = r.length
2275 length = r.length
2276 inline = r._inline
2276 inline = r._inline
2277 iosize = r._io.size
2277 iosize = r._io.size
2278 buffer = util.buffer
2278 buffer = util.buffer
2279
2279
2280 chunks = []
2280 chunks = []
2281 ladd = chunks.append
2281 ladd = chunks.append
2282 for idx, item in enumerate(chain):
2282 for idx, item in enumerate(chain):
2283 offset = start(item[0])
2283 offset = start(item[0])
2284 bits = data[idx]
2284 bits = data[idx]
2285 for rev in item:
2285 for rev in item:
2286 chunkstart = start(rev)
2286 chunkstart = start(rev)
2287 if inline:
2287 if inline:
2288 chunkstart += (rev + 1) * iosize
2288 chunkstart += (rev + 1) * iosize
2289 chunklength = length(rev)
2289 chunklength = length(rev)
2290 ladd(buffer(bits, chunkstart - offset, chunklength))
2290 ladd(buffer(bits, chunkstart - offset, chunklength))
2291
2291
2292 return chunks
2292 return chunks
2293
2293
2294 def dodeltachain(rev):
2294 def dodeltachain(rev):
2295 if not cache:
2295 if not cache:
2296 r.clearcaches()
2296 r.clearcaches()
2297 r._deltachain(rev)
2297 r._deltachain(rev)
2298
2298
2299 def doread(chain):
2299 def doread(chain):
2300 if not cache:
2300 if not cache:
2301 r.clearcaches()
2301 r.clearcaches()
2302 for item in slicedchain:
2302 for item in slicedchain:
2303 segmentforrevs(item[0], item[-1])
2303 segmentforrevs(item[0], item[-1])
2304
2304
2305 def doslice(r, chain, size):
2305 def doslice(r, chain, size):
2306 for s in slicechunk(r, chain, targetsize=size):
2306 for s in slicechunk(r, chain, targetsize=size):
2307 pass
2307 pass
2308
2308
2309 def dorawchunks(data, chain):
2309 def dorawchunks(data, chain):
2310 if not cache:
2310 if not cache:
2311 r.clearcaches()
2311 r.clearcaches()
2312 getrawchunks(data, chain)
2312 getrawchunks(data, chain)
2313
2313
2314 def dodecompress(chunks):
2314 def dodecompress(chunks):
2315 decomp = r.decompress
2315 decomp = r.decompress
2316 for chunk in chunks:
2316 for chunk in chunks:
2317 decomp(chunk)
2317 decomp(chunk)
2318
2318
2319 def dopatch(text, bins):
2319 def dopatch(text, bins):
2320 if not cache:
2320 if not cache:
2321 r.clearcaches()
2321 r.clearcaches()
2322 mdiff.patches(text, bins)
2322 mdiff.patches(text, bins)
2323
2323
2324 def dohash(text):
2324 def dohash(text):
2325 if not cache:
2325 if not cache:
2326 r.clearcaches()
2326 r.clearcaches()
2327 r.checkhash(text, node, rev=rev)
2327 r.checkhash(text, node, rev=rev)
2328
2328
2329 def dorevision():
2329 def dorevision():
2330 if not cache:
2330 if not cache:
2331 r.clearcaches()
2331 r.clearcaches()
2332 r.revision(node)
2332 r.revision(node)
2333
2333
2334 try:
2334 try:
2335 from mercurial.revlogutils.deltas import slicechunk
2335 from mercurial.revlogutils.deltas import slicechunk
2336 except ImportError:
2336 except ImportError:
2337 slicechunk = getattr(revlog, '_slicechunk', None)
2337 slicechunk = getattr(revlog, '_slicechunk', None)
2338
2338
2339 size = r.length(rev)
2339 size = r.length(rev)
2340 chain = r._deltachain(rev)[0]
2340 chain = r._deltachain(rev)[0]
2341 if not getattr(r, '_withsparseread', False):
2341 if not getattr(r, '_withsparseread', False):
2342 slicedchain = (chain,)
2342 slicedchain = (chain,)
2343 else:
2343 else:
2344 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2344 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2345 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2345 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2346 rawchunks = getrawchunks(data, slicedchain)
2346 rawchunks = getrawchunks(data, slicedchain)
2347 bins = r._chunks(chain)
2347 bins = r._chunks(chain)
2348 text = bytes(bins[0])
2348 text = bytes(bins[0])
2349 bins = bins[1:]
2349 bins = bins[1:]
2350 text = mdiff.patches(text, bins)
2350 text = mdiff.patches(text, bins)
2351
2351
2352 benches = [
2352 benches = [
2353 (lambda: dorevision(), b'full'),
2353 (lambda: dorevision(), b'full'),
2354 (lambda: dodeltachain(rev), b'deltachain'),
2354 (lambda: dodeltachain(rev), b'deltachain'),
2355 (lambda: doread(chain), b'read'),
2355 (lambda: doread(chain), b'read'),
2356 ]
2356 ]
2357
2357
2358 if getattr(r, '_withsparseread', False):
2358 if getattr(r, '_withsparseread', False):
2359 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2359 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2360 benches.append(slicing)
2360 benches.append(slicing)
2361
2361
2362 benches.extend([
2362 benches.extend([
2363 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2363 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2364 (lambda: dodecompress(rawchunks), b'decompress'),
2364 (lambda: dodecompress(rawchunks), b'decompress'),
2365 (lambda: dopatch(text, bins), b'patch'),
2365 (lambda: dopatch(text, bins), b'patch'),
2366 (lambda: dohash(text), b'hash'),
2366 (lambda: dohash(text), b'hash'),
2367 ])
2367 ])
2368
2368
2369 timer, fm = gettimer(ui, opts)
2369 timer, fm = gettimer(ui, opts)
2370 for fn, title in benches:
2370 for fn, title in benches:
2371 timer(fn, title=title)
2371 timer(fn, title=title)
2372 fm.end()
2372 fm.end()
2373
2373
2374 @command(b'perfrevset',
2374 @command(b'perfrevset',
2375 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2375 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2376 (b'', b'contexts', False, b'obtain changectx for each revision')]
2376 (b'', b'contexts', False, b'obtain changectx for each revision')]
2377 + formatteropts, b"REVSET")
2377 + formatteropts, b"REVSET")
2378 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2378 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2379 """benchmark the execution time of a revset
2379 """benchmark the execution time of a revset
2380
2380
2381 Use the --clean option if need to evaluate the impact of build volatile
2381 Use the --clean option if need to evaluate the impact of build volatile
2382 revisions set cache on the revset execution. Volatile cache hold filtered
2382 revisions set cache on the revset execution. Volatile cache hold filtered
2383 and obsolete related cache."""
2383 and obsolete related cache."""
2384 opts = _byteskwargs(opts)
2384 opts = _byteskwargs(opts)
2385
2385
2386 timer, fm = gettimer(ui, opts)
2386 timer, fm = gettimer(ui, opts)
2387 def d():
2387 def d():
2388 if clear:
2388 if clear:
2389 repo.invalidatevolatilesets()
2389 repo.invalidatevolatilesets()
2390 if contexts:
2390 if contexts:
2391 for ctx in repo.set(expr): pass
2391 for ctx in repo.set(expr): pass
2392 else:
2392 else:
2393 for r in repo.revs(expr): pass
2393 for r in repo.revs(expr): pass
2394 timer(d)
2394 timer(d)
2395 fm.end()
2395 fm.end()
2396
2396
2397 @command(b'perfvolatilesets',
2397 @command(b'perfvolatilesets',
2398 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2398 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2399 ] + formatteropts)
2399 ] + formatteropts)
2400 def perfvolatilesets(ui, repo, *names, **opts):
2400 def perfvolatilesets(ui, repo, *names, **opts):
2401 """benchmark the computation of various volatile set
2401 """benchmark the computation of various volatile set
2402
2402
2403 Volatile set computes element related to filtering and obsolescence."""
2403 Volatile set computes element related to filtering and obsolescence."""
2404 opts = _byteskwargs(opts)
2404 opts = _byteskwargs(opts)
2405 timer, fm = gettimer(ui, opts)
2405 timer, fm = gettimer(ui, opts)
2406 repo = repo.unfiltered()
2406 repo = repo.unfiltered()
2407
2407
2408 def getobs(name):
2408 def getobs(name):
2409 def d():
2409 def d():
2410 repo.invalidatevolatilesets()
2410 repo.invalidatevolatilesets()
2411 if opts[b'clear_obsstore']:
2411 if opts[b'clear_obsstore']:
2412 clearfilecache(repo, b'obsstore')
2412 clearfilecache(repo, b'obsstore')
2413 obsolete.getrevs(repo, name)
2413 obsolete.getrevs(repo, name)
2414 return d
2414 return d
2415
2415
2416 allobs = sorted(obsolete.cachefuncs)
2416 allobs = sorted(obsolete.cachefuncs)
2417 if names:
2417 if names:
2418 allobs = [n for n in allobs if n in names]
2418 allobs = [n for n in allobs if n in names]
2419
2419
2420 for name in allobs:
2420 for name in allobs:
2421 timer(getobs(name), title=name)
2421 timer(getobs(name), title=name)
2422
2422
2423 def getfiltered(name):
2423 def getfiltered(name):
2424 def d():
2424 def d():
2425 repo.invalidatevolatilesets()
2425 repo.invalidatevolatilesets()
2426 if opts[b'clear_obsstore']:
2426 if opts[b'clear_obsstore']:
2427 clearfilecache(repo, b'obsstore')
2427 clearfilecache(repo, b'obsstore')
2428 repoview.filterrevs(repo, name)
2428 repoview.filterrevs(repo, name)
2429 return d
2429 return d
2430
2430
2431 allfilter = sorted(repoview.filtertable)
2431 allfilter = sorted(repoview.filtertable)
2432 if names:
2432 if names:
2433 allfilter = [n for n in allfilter if n in names]
2433 allfilter = [n for n in allfilter if n in names]
2434
2434
2435 for name in allfilter:
2435 for name in allfilter:
2436 timer(getfiltered(name), title=name)
2436 timer(getfiltered(name), title=name)
2437 fm.end()
2437 fm.end()
2438
2438
2439 @command(b'perfbranchmap',
2439 @command(b'perfbranchmap',
2440 [(b'f', b'full', False,
2440 [(b'f', b'full', False,
2441 b'Includes build time of subset'),
2441 b'Includes build time of subset'),
2442 (b'', b'clear-revbranch', False,
2442 (b'', b'clear-revbranch', False,
2443 b'purge the revbranch cache between computation'),
2443 b'purge the revbranch cache between computation'),
2444 ] + formatteropts)
2444 ] + formatteropts)
2445 def perfbranchmap(ui, repo, *filternames, **opts):
2445 def perfbranchmap(ui, repo, *filternames, **opts):
2446 """benchmark the update of a branchmap
2446 """benchmark the update of a branchmap
2447
2447
2448 This benchmarks the full repo.branchmap() call with read and write disabled
2448 This benchmarks the full repo.branchmap() call with read and write disabled
2449 """
2449 """
2450 opts = _byteskwargs(opts)
2450 opts = _byteskwargs(opts)
2451 full = opts.get(b"full", False)
2451 full = opts.get(b"full", False)
2452 clear_revbranch = opts.get(b"clear_revbranch", False)
2452 clear_revbranch = opts.get(b"clear_revbranch", False)
2453 timer, fm = gettimer(ui, opts)
2453 timer, fm = gettimer(ui, opts)
2454 def getbranchmap(filtername):
2454 def getbranchmap(filtername):
2455 """generate a benchmark function for the filtername"""
2455 """generate a benchmark function for the filtername"""
2456 if filtername is None:
2456 if filtername is None:
2457 view = repo
2457 view = repo
2458 else:
2458 else:
2459 view = repo.filtered(filtername)
2459 view = repo.filtered(filtername)
2460 if util.safehasattr(view._branchcaches, '_per_filter'):
2460 if util.safehasattr(view._branchcaches, '_per_filter'):
2461 filtered = view._branchcaches._per_filter
2461 filtered = view._branchcaches._per_filter
2462 else:
2462 else:
2463 # older versions
2463 # older versions
2464 filtered = view._branchcaches
2464 filtered = view._branchcaches
2465 def d():
2465 def d():
2466 if clear_revbranch:
2466 if clear_revbranch:
2467 repo.revbranchcache()._clear()
2467 repo.revbranchcache()._clear()
2468 if full:
2468 if full:
2469 view._branchcaches.clear()
2469 view._branchcaches.clear()
2470 else:
2470 else:
2471 filtered.pop(filtername, None)
2471 filtered.pop(filtername, None)
2472 view.branchmap()
2472 view.branchmap()
2473 return d
2473 return d
2474 # add filter in smaller subset to bigger subset
2474 # add filter in smaller subset to bigger subset
2475 possiblefilters = set(repoview.filtertable)
2475 possiblefilters = set(repoview.filtertable)
2476 if filternames:
2476 if filternames:
2477 possiblefilters &= set(filternames)
2477 possiblefilters &= set(filternames)
2478 subsettable = getbranchmapsubsettable()
2478 subsettable = getbranchmapsubsettable()
2479 allfilters = []
2479 allfilters = []
2480 while possiblefilters:
2480 while possiblefilters:
2481 for name in possiblefilters:
2481 for name in possiblefilters:
2482 subset = subsettable.get(name)
2482 subset = subsettable.get(name)
2483 if subset not in possiblefilters:
2483 if subset not in possiblefilters:
2484 break
2484 break
2485 else:
2485 else:
2486 assert False, b'subset cycle %s!' % possiblefilters
2486 assert False, b'subset cycle %s!' % possiblefilters
2487 allfilters.append(name)
2487 allfilters.append(name)
2488 possiblefilters.remove(name)
2488 possiblefilters.remove(name)
2489
2489
2490 # warm the cache
2490 # warm the cache
2491 if not full:
2491 if not full:
2492 for name in allfilters:
2492 for name in allfilters:
2493 repo.filtered(name).branchmap()
2493 repo.filtered(name).branchmap()
2494 if not filternames or b'unfiltered' in filternames:
2494 if not filternames or b'unfiltered' in filternames:
2495 # add unfiltered
2495 # add unfiltered
2496 allfilters.append(None)
2496 allfilters.append(None)
2497
2497
2498 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2498 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2499 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2499 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2500 branchcacheread.set(classmethod(lambda *args: None))
2500 branchcacheread.set(classmethod(lambda *args: None))
2501 else:
2501 else:
2502 # older versions
2502 # older versions
2503 branchcacheread = safeattrsetter(branchmap, b'read')
2503 branchcacheread = safeattrsetter(branchmap, b'read')
2504 branchcacheread.set(lambda *args: None)
2504 branchcacheread.set(lambda *args: None)
2505 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2505 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2506 branchcachewrite.set(lambda *args: None)
2506 branchcachewrite.set(lambda *args: None)
2507 try:
2507 try:
2508 for name in allfilters:
2508 for name in allfilters:
2509 printname = name
2509 printname = name
2510 if name is None:
2510 if name is None:
2511 printname = b'unfiltered'
2511 printname = b'unfiltered'
2512 timer(getbranchmap(name), title=str(printname))
2512 timer(getbranchmap(name), title=str(printname))
2513 finally:
2513 finally:
2514 branchcacheread.restore()
2514 branchcacheread.restore()
2515 branchcachewrite.restore()
2515 branchcachewrite.restore()
2516 fm.end()
2516 fm.end()
2517
2517
2518 @command(b'perfbranchmapupdate', [
2518 @command(b'perfbranchmapupdate', [
2519 (b'', b'base', [], b'subset of revision to start from'),
2519 (b'', b'base', [], b'subset of revision to start from'),
2520 (b'', b'target', [], b'subset of revision to end with'),
2520 (b'', b'target', [], b'subset of revision to end with'),
2521 (b'', b'clear-caches', False, b'clear cache between each runs')
2521 (b'', b'clear-caches', False, b'clear cache between each runs')
2522 ] + formatteropts)
2522 ] + formatteropts)
2523 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2523 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2524 """benchmark branchmap update from for <base> revs to <target> revs
2524 """benchmark branchmap update from for <base> revs to <target> revs
2525
2525
2526 If `--clear-caches` is passed, the following items will be reset before
2526 If `--clear-caches` is passed, the following items will be reset before
2527 each update:
2527 each update:
2528 * the changelog instance and associated indexes
2528 * the changelog instance and associated indexes
2529 * the rev-branch-cache instance
2529 * the rev-branch-cache instance
2530
2530
2531 Examples:
2531 Examples:
2532
2532
2533 # update for the one last revision
2533 # update for the one last revision
2534 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2534 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2535
2535
2536 $ update for change coming with a new branch
2536 $ update for change coming with a new branch
2537 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2537 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2538 """
2538 """
2539 from mercurial import branchmap
2539 from mercurial import branchmap
2540 from mercurial import repoview
2540 from mercurial import repoview
2541 opts = _byteskwargs(opts)
2541 opts = _byteskwargs(opts)
2542 timer, fm = gettimer(ui, opts)
2542 timer, fm = gettimer(ui, opts)
2543 clearcaches = opts[b'clear_caches']
2543 clearcaches = opts[b'clear_caches']
2544 unfi = repo.unfiltered()
2544 unfi = repo.unfiltered()
2545 x = [None] # used to pass data between closure
2545 x = [None] # used to pass data between closure
2546
2546
2547 # we use a `list` here to avoid possible side effect from smartset
2547 # we use a `list` here to avoid possible side effect from smartset
2548 baserevs = list(scmutil.revrange(repo, base))
2548 baserevs = list(scmutil.revrange(repo, base))
2549 targetrevs = list(scmutil.revrange(repo, target))
2549 targetrevs = list(scmutil.revrange(repo, target))
2550 if not baserevs:
2550 if not baserevs:
2551 raise error.Abort(b'no revisions selected for --base')
2551 raise error.Abort(b'no revisions selected for --base')
2552 if not targetrevs:
2552 if not targetrevs:
2553 raise error.Abort(b'no revisions selected for --target')
2553 raise error.Abort(b'no revisions selected for --target')
2554
2554
2555 # make sure the target branchmap also contains the one in the base
2555 # make sure the target branchmap also contains the one in the base
2556 targetrevs = list(set(baserevs) | set(targetrevs))
2556 targetrevs = list(set(baserevs) | set(targetrevs))
2557 targetrevs.sort()
2557 targetrevs.sort()
2558
2558
2559 cl = repo.changelog
2559 cl = repo.changelog
2560 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2560 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2561 allbaserevs.sort()
2561 allbaserevs.sort()
2562 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2562 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2563
2563
2564 newrevs = list(alltargetrevs.difference(allbaserevs))
2564 newrevs = list(alltargetrevs.difference(allbaserevs))
2565 newrevs.sort()
2565 newrevs.sort()
2566
2566
2567 allrevs = frozenset(unfi.changelog.revs())
2567 allrevs = frozenset(unfi.changelog.revs())
2568 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2568 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2569 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2569 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2570
2570
2571 def basefilter(repo, visibilityexceptions=None):
2571 def basefilter(repo, visibilityexceptions=None):
2572 return basefilterrevs
2572 return basefilterrevs
2573
2573
2574 def targetfilter(repo, visibilityexceptions=None):
2574 def targetfilter(repo, visibilityexceptions=None):
2575 return targetfilterrevs
2575 return targetfilterrevs
2576
2576
2577 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2577 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2578 ui.status(msg % (len(allbaserevs), len(newrevs)))
2578 ui.status(msg % (len(allbaserevs), len(newrevs)))
2579 if targetfilterrevs:
2579 if targetfilterrevs:
2580 msg = b'(%d revisions still filtered)\n'
2580 msg = b'(%d revisions still filtered)\n'
2581 ui.status(msg % len(targetfilterrevs))
2581 ui.status(msg % len(targetfilterrevs))
2582
2582
2583 try:
2583 try:
2584 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2584 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2585 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2585 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2586
2586
2587 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2587 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2588 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2588 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2589
2589
2590 # try to find an existing branchmap to reuse
2590 # try to find an existing branchmap to reuse
2591 subsettable = getbranchmapsubsettable()
2591 subsettable = getbranchmapsubsettable()
2592 candidatefilter = subsettable.get(None)
2592 candidatefilter = subsettable.get(None)
2593 while candidatefilter is not None:
2593 while candidatefilter is not None:
2594 candidatebm = repo.filtered(candidatefilter).branchmap()
2594 candidatebm = repo.filtered(candidatefilter).branchmap()
2595 if candidatebm.validfor(baserepo):
2595 if candidatebm.validfor(baserepo):
2596 filtered = repoview.filterrevs(repo, candidatefilter)
2596 filtered = repoview.filterrevs(repo, candidatefilter)
2597 missing = [r for r in allbaserevs if r in filtered]
2597 missing = [r for r in allbaserevs if r in filtered]
2598 base = candidatebm.copy()
2598 base = candidatebm.copy()
2599 base.update(baserepo, missing)
2599 base.update(baserepo, missing)
2600 break
2600 break
2601 candidatefilter = subsettable.get(candidatefilter)
2601 candidatefilter = subsettable.get(candidatefilter)
2602 else:
2602 else:
2603 # no suitable subset where found
2603 # no suitable subset where found
2604 base = branchmap.branchcache()
2604 base = branchmap.branchcache()
2605 base.update(baserepo, allbaserevs)
2605 base.update(baserepo, allbaserevs)
2606
2606
2607 def setup():
2607 def setup():
2608 x[0] = base.copy()
2608 x[0] = base.copy()
2609 if clearcaches:
2609 if clearcaches:
2610 unfi._revbranchcache = None
2610 unfi._revbranchcache = None
2611 clearchangelog(repo)
2611 clearchangelog(repo)
2612
2612
2613 def bench():
2613 def bench():
2614 x[0].update(targetrepo, newrevs)
2614 x[0].update(targetrepo, newrevs)
2615
2615
2616 timer(bench, setup=setup)
2616 timer(bench, setup=setup)
2617 fm.end()
2617 fm.end()
2618 finally:
2618 finally:
2619 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2619 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2620 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2620 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2621
2621
2622 @command(b'perfbranchmapload', [
2622 @command(b'perfbranchmapload', [
2623 (b'f', b'filter', b'', b'Specify repoview filter'),
2623 (b'f', b'filter', b'', b'Specify repoview filter'),
2624 (b'', b'list', False, b'List brachmap filter caches'),
2624 (b'', b'list', False, b'List brachmap filter caches'),
2625 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2625 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2626
2626
2627 ] + formatteropts)
2627 ] + formatteropts)
2628 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2628 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2629 """benchmark reading the branchmap"""
2629 """benchmark reading the branchmap"""
2630 opts = _byteskwargs(opts)
2630 opts = _byteskwargs(opts)
2631 clearrevlogs = opts[b'clear_revlogs']
2631 clearrevlogs = opts[b'clear_revlogs']
2632
2632
2633 if list:
2633 if list:
2634 for name, kind, st in repo.cachevfs.readdir(stat=True):
2634 for name, kind, st in repo.cachevfs.readdir(stat=True):
2635 if name.startswith(b'branch2'):
2635 if name.startswith(b'branch2'):
2636 filtername = name.partition(b'-')[2] or b'unfiltered'
2636 filtername = name.partition(b'-')[2] or b'unfiltered'
2637 ui.status(b'%s - %s\n'
2637 ui.status(b'%s - %s\n'
2638 % (filtername, util.bytecount(st.st_size)))
2638 % (filtername, util.bytecount(st.st_size)))
2639 return
2639 return
2640 if not filter:
2640 if not filter:
2641 filter = None
2641 filter = None
2642 subsettable = getbranchmapsubsettable()
2642 subsettable = getbranchmapsubsettable()
2643 if filter is None:
2643 if filter is None:
2644 repo = repo.unfiltered()
2644 repo = repo.unfiltered()
2645 else:
2645 else:
2646 repo = repoview.repoview(repo, filter)
2646 repo = repoview.repoview(repo, filter)
2647
2647
2648 repo.branchmap() # make sure we have a relevant, up to date branchmap
2648 repo.branchmap() # make sure we have a relevant, up to date branchmap
2649
2649
2650 try:
2650 try:
2651 fromfile = branchmap.branchcache.fromfile
2651 fromfile = branchmap.branchcache.fromfile
2652 except AttributeError:
2652 except AttributeError:
2653 # older versions
2653 # older versions
2654 fromfile = branchmap.read
2654 fromfile = branchmap.read
2655
2655
2656 currentfilter = filter
2656 currentfilter = filter
2657 # try once without timer, the filter may not be cached
2657 # try once without timer, the filter may not be cached
2658 while fromfile(repo) is None:
2658 while fromfile(repo) is None:
2659 currentfilter = subsettable.get(currentfilter)
2659 currentfilter = subsettable.get(currentfilter)
2660 if currentfilter is None:
2660 if currentfilter is None:
2661 raise error.Abort(b'No branchmap cached for %s repo'
2661 raise error.Abort(b'No branchmap cached for %s repo'
2662 % (filter or b'unfiltered'))
2662 % (filter or b'unfiltered'))
2663 repo = repo.filtered(currentfilter)
2663 repo = repo.filtered(currentfilter)
2664 timer, fm = gettimer(ui, opts)
2664 timer, fm = gettimer(ui, opts)
2665 def setup():
2665 def setup():
2666 if clearrevlogs:
2666 if clearrevlogs:
2667 clearchangelog(repo)
2667 clearchangelog(repo)
2668 def bench():
2668 def bench():
2669 fromfile(repo)
2669 fromfile(repo)
2670 timer(bench, setup=setup)
2670 timer(bench, setup=setup)
2671 fm.end()
2671 fm.end()
2672
2672
2673 @command(b'perfloadmarkers')
2673 @command(b'perfloadmarkers')
2674 def perfloadmarkers(ui, repo):
2674 def perfloadmarkers(ui, repo):
2675 """benchmark the time to parse the on-disk markers for a repo
2675 """benchmark the time to parse the on-disk markers for a repo
2676
2676
2677 Result is the number of markers in the repo."""
2677 Result is the number of markers in the repo."""
2678 timer, fm = gettimer(ui)
2678 timer, fm = gettimer(ui)
2679 svfs = getsvfs(repo)
2679 svfs = getsvfs(repo)
2680 timer(lambda: len(obsolete.obsstore(svfs)))
2680 timer(lambda: len(obsolete.obsstore(svfs)))
2681 fm.end()
2681 fm.end()
2682
2682
2683 @command(b'perflrucachedict', formatteropts +
2683 @command(b'perflrucachedict', formatteropts +
2684 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2684 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2685 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2685 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2686 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2686 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2687 (b'', b'size', 4, b'size of cache'),
2687 (b'', b'size', 4, b'size of cache'),
2688 (b'', b'gets', 10000, b'number of key lookups'),
2688 (b'', b'gets', 10000, b'number of key lookups'),
2689 (b'', b'sets', 10000, b'number of key sets'),
2689 (b'', b'sets', 10000, b'number of key sets'),
2690 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2690 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2691 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2691 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2692 norepo=True)
2692 norepo=True)
2693 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2693 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2694 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2694 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2695 opts = _byteskwargs(opts)
2695 opts = _byteskwargs(opts)
2696
2696
2697 def doinit():
2697 def doinit():
2698 for i in _xrange(10000):
2698 for i in _xrange(10000):
2699 util.lrucachedict(size)
2699 util.lrucachedict(size)
2700
2700
2701 costrange = list(range(mincost, maxcost + 1))
2701 costrange = list(range(mincost, maxcost + 1))
2702
2702
2703 values = []
2703 values = []
2704 for i in _xrange(size):
2704 for i in _xrange(size):
2705 values.append(random.randint(0, _maxint))
2705 values.append(random.randint(0, _maxint))
2706
2706
2707 # Get mode fills the cache and tests raw lookup performance with no
2707 # Get mode fills the cache and tests raw lookup performance with no
2708 # eviction.
2708 # eviction.
2709 getseq = []
2709 getseq = []
2710 for i in _xrange(gets):
2710 for i in _xrange(gets):
2711 getseq.append(random.choice(values))
2711 getseq.append(random.choice(values))
2712
2712
2713 def dogets():
2713 def dogets():
2714 d = util.lrucachedict(size)
2714 d = util.lrucachedict(size)
2715 for v in values:
2715 for v in values:
2716 d[v] = v
2716 d[v] = v
2717 for key in getseq:
2717 for key in getseq:
2718 value = d[key]
2718 value = d[key]
2719 value # silence pyflakes warning
2719 value # silence pyflakes warning
2720
2720
2721 def dogetscost():
2721 def dogetscost():
2722 d = util.lrucachedict(size, maxcost=costlimit)
2722 d = util.lrucachedict(size, maxcost=costlimit)
2723 for i, v in enumerate(values):
2723 for i, v in enumerate(values):
2724 d.insert(v, v, cost=costs[i])
2724 d.insert(v, v, cost=costs[i])
2725 for key in getseq:
2725 for key in getseq:
2726 try:
2726 try:
2727 value = d[key]
2727 value = d[key]
2728 value # silence pyflakes warning
2728 value # silence pyflakes warning
2729 except KeyError:
2729 except KeyError:
2730 pass
2730 pass
2731
2731
2732 # Set mode tests insertion speed with cache eviction.
2732 # Set mode tests insertion speed with cache eviction.
2733 setseq = []
2733 setseq = []
2734 costs = []
2734 costs = []
2735 for i in _xrange(sets):
2735 for i in _xrange(sets):
2736 setseq.append(random.randint(0, _maxint))
2736 setseq.append(random.randint(0, _maxint))
2737 costs.append(random.choice(costrange))
2737 costs.append(random.choice(costrange))
2738
2738
2739 def doinserts():
2739 def doinserts():
2740 d = util.lrucachedict(size)
2740 d = util.lrucachedict(size)
2741 for v in setseq:
2741 for v in setseq:
2742 d.insert(v, v)
2742 d.insert(v, v)
2743
2743
2744 def doinsertscost():
2744 def doinsertscost():
2745 d = util.lrucachedict(size, maxcost=costlimit)
2745 d = util.lrucachedict(size, maxcost=costlimit)
2746 for i, v in enumerate(setseq):
2746 for i, v in enumerate(setseq):
2747 d.insert(v, v, cost=costs[i])
2747 d.insert(v, v, cost=costs[i])
2748
2748
2749 def dosets():
2749 def dosets():
2750 d = util.lrucachedict(size)
2750 d = util.lrucachedict(size)
2751 for v in setseq:
2751 for v in setseq:
2752 d[v] = v
2752 d[v] = v
2753
2753
2754 # Mixed mode randomly performs gets and sets with eviction.
2754 # Mixed mode randomly performs gets and sets with eviction.
2755 mixedops = []
2755 mixedops = []
2756 for i in _xrange(mixed):
2756 for i in _xrange(mixed):
2757 r = random.randint(0, 100)
2757 r = random.randint(0, 100)
2758 if r < mixedgetfreq:
2758 if r < mixedgetfreq:
2759 op = 0
2759 op = 0
2760 else:
2760 else:
2761 op = 1
2761 op = 1
2762
2762
2763 mixedops.append((op,
2763 mixedops.append((op,
2764 random.randint(0, size * 2),
2764 random.randint(0, size * 2),
2765 random.choice(costrange)))
2765 random.choice(costrange)))
2766
2766
2767 def domixed():
2767 def domixed():
2768 d = util.lrucachedict(size)
2768 d = util.lrucachedict(size)
2769
2769
2770 for op, v, cost in mixedops:
2770 for op, v, cost in mixedops:
2771 if op == 0:
2771 if op == 0:
2772 try:
2772 try:
2773 d[v]
2773 d[v]
2774 except KeyError:
2774 except KeyError:
2775 pass
2775 pass
2776 else:
2776 else:
2777 d[v] = v
2777 d[v] = v
2778
2778
2779 def domixedcost():
2779 def domixedcost():
2780 d = util.lrucachedict(size, maxcost=costlimit)
2780 d = util.lrucachedict(size, maxcost=costlimit)
2781
2781
2782 for op, v, cost in mixedops:
2782 for op, v, cost in mixedops:
2783 if op == 0:
2783 if op == 0:
2784 try:
2784 try:
2785 d[v]
2785 d[v]
2786 except KeyError:
2786 except KeyError:
2787 pass
2787 pass
2788 else:
2788 else:
2789 d.insert(v, v, cost=cost)
2789 d.insert(v, v, cost=cost)
2790
2790
2791 benches = [
2791 benches = [
2792 (doinit, b'init'),
2792 (doinit, b'init'),
2793 ]
2793 ]
2794
2794
2795 if costlimit:
2795 if costlimit:
2796 benches.extend([
2796 benches.extend([
2797 (dogetscost, b'gets w/ cost limit'),
2797 (dogetscost, b'gets w/ cost limit'),
2798 (doinsertscost, b'inserts w/ cost limit'),
2798 (doinsertscost, b'inserts w/ cost limit'),
2799 (domixedcost, b'mixed w/ cost limit'),
2799 (domixedcost, b'mixed w/ cost limit'),
2800 ])
2800 ])
2801 else:
2801 else:
2802 benches.extend([
2802 benches.extend([
2803 (dogets, b'gets'),
2803 (dogets, b'gets'),
2804 (doinserts, b'inserts'),
2804 (doinserts, b'inserts'),
2805 (dosets, b'sets'),
2805 (dosets, b'sets'),
2806 (domixed, b'mixed')
2806 (domixed, b'mixed')
2807 ])
2807 ])
2808
2808
2809 for fn, title in benches:
2809 for fn, title in benches:
2810 timer, fm = gettimer(ui, opts)
2810 timer, fm = gettimer(ui, opts)
2811 timer(fn, title=title)
2811 timer(fn, title=title)
2812 fm.end()
2812 fm.end()
2813
2813
2814 @command(b'perfwrite', formatteropts)
2814 @command(b'perfwrite', formatteropts)
2815 def perfwrite(ui, repo, **opts):
2815 def perfwrite(ui, repo, **opts):
2816 """microbenchmark ui.write
2816 """microbenchmark ui.write
2817 """
2817 """
2818 opts = _byteskwargs(opts)
2818 opts = _byteskwargs(opts)
2819
2819
2820 timer, fm = gettimer(ui, opts)
2820 timer, fm = gettimer(ui, opts)
2821 def write():
2821 def write():
2822 for i in range(100000):
2822 for i in range(100000):
2823 ui.write((b'Testing write performance\n'))
2823 ui.write((b'Testing write performance\n'))
2824 timer(write)
2824 timer(write)
2825 fm.end()
2825 fm.end()
2826
2826
2827 def uisetup(ui):
2827 def uisetup(ui):
2828 if (util.safehasattr(cmdutil, b'openrevlog') and
2828 if (util.safehasattr(cmdutil, b'openrevlog') and
2829 not util.safehasattr(commands, b'debugrevlogopts')):
2829 not util.safehasattr(commands, b'debugrevlogopts')):
2830 # for "historical portability":
2830 # for "historical portability":
2831 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2831 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2832 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2832 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2833 # openrevlog() should cause failure, because it has been
2833 # openrevlog() should cause failure, because it has been
2834 # available since 3.5 (or 49c583ca48c4).
2834 # available since 3.5 (or 49c583ca48c4).
2835 def openrevlog(orig, repo, cmd, file_, opts):
2835 def openrevlog(orig, repo, cmd, file_, opts):
2836 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2836 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2837 raise error.Abort(b"This version doesn't support --dir option",
2837 raise error.Abort(b"This version doesn't support --dir option",
2838 hint=b"use 3.5 or later")
2838 hint=b"use 3.5 or later")
2839 return orig(repo, cmd, file_, opts)
2839 return orig(repo, cmd, file_, opts)
2840 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2840 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2841
2841
2842 @command(b'perfprogress', formatteropts + [
2842 @command(b'perfprogress', formatteropts + [
2843 (b'', b'topic', b'topic', b'topic for progress messages'),
2843 (b'', b'topic', b'topic', b'topic for progress messages'),
2844 (b'c', b'total', 1000000, b'total value we are progressing to'),
2844 (b'c', b'total', 1000000, b'total value we are progressing to'),
2845 ], norepo=True)
2845 ], norepo=True)
2846 def perfprogress(ui, topic=None, total=None, **opts):
2846 def perfprogress(ui, topic=None, total=None, **opts):
2847 """printing of progress bars"""
2847 """printing of progress bars"""
2848 opts = _byteskwargs(opts)
2848 opts = _byteskwargs(opts)
2849
2849
2850 timer, fm = gettimer(ui, opts)
2850 timer, fm = gettimer(ui, opts)
2851
2851
2852 def doprogress():
2852 def doprogress():
2853 with ui.makeprogress(topic, total=total) as progress:
2853 with ui.makeprogress(topic, total=total) as progress:
2854 for i in pycompat.xrange(total):
2854 for i in pycompat.xrange(total):
2855 progress.increment()
2855 progress.increment()
2856
2856
2857 timer(doprogress)
2857 timer(doprogress)
2858 fm.end()
2858 fm.end()
@@ -1,355 +1,356
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "run-limits"
58 "run-limits"
59 Control the number of runs each benchmark will perform. The option value
59 Control the number of runs each benchmark will perform. The option value
60 should be a list of '<time>-<numberofrun>' pairs. After each run the
60 should be a list of '<time>-<numberofrun>' pairs. After each run the
61 conditions are considered in order with the following logic:
61 conditions are considered in order with the following logic:
62
62
63 If benchmark has been running for <time> seconds, and we have performed
63 If benchmark has been running for <time> seconds, and we have performed
64 <numberofrun> iterations, stop the benchmark,
64 <numberofrun> iterations, stop the benchmark,
65
65
66 The default value is: '3.0-100, 10.0-3'
66 The default value is: '3.0-100, 10.0-3'
67
67
68 "stub"
68 "stub"
69 When set, benchmarks will only be run once, useful for testing (default:
69 When set, benchmarks will only be run once, useful for testing (default:
70 off)
70 off)
71
71
72 list of commands:
72 list of commands:
73
73
74 perfaddremove
74 perfaddremove
75 (no help text available)
75 (no help text available)
76 perfancestors
76 perfancestors
77 (no help text available)
77 (no help text available)
78 perfancestorset
78 perfancestorset
79 (no help text available)
79 (no help text available)
80 perfannotate (no help text available)
80 perfannotate (no help text available)
81 perfbdiff benchmark a bdiff between revisions
81 perfbdiff benchmark a bdiff between revisions
82 perfbookmarks
82 perfbookmarks
83 benchmark parsing bookmarks from disk to memory
83 benchmark parsing bookmarks from disk to memory
84 perfbranchmap
84 perfbranchmap
85 benchmark the update of a branchmap
85 benchmark the update of a branchmap
86 perfbranchmapload
86 perfbranchmapload
87 benchmark reading the branchmap
87 benchmark reading the branchmap
88 perfbranchmapupdate
88 perfbranchmapupdate
89 benchmark branchmap update from for <base> revs to <target>
89 benchmark branchmap update from for <base> revs to <target>
90 revs
90 revs
91 perfbundleread
91 perfbundleread
92 Benchmark reading of bundle files.
92 Benchmark reading of bundle files.
93 perfcca (no help text available)
93 perfcca (no help text available)
94 perfchangegroupchangelog
94 perfchangegroupchangelog
95 Benchmark producing a changelog group for a changegroup.
95 Benchmark producing a changelog group for a changegroup.
96 perfchangeset
96 perfchangeset
97 (no help text available)
97 (no help text available)
98 perfctxfiles (no help text available)
98 perfctxfiles (no help text available)
99 perfdiffwd Profile diff of working directory changes
99 perfdiffwd Profile diff of working directory changes
100 perfdirfoldmap
100 perfdirfoldmap
101 (no help text available)
101 (no help text available)
102 perfdirs (no help text available)
102 perfdirs (no help text available)
103 perfdirstate (no help text available)
103 perfdirstate (no help text available)
104 perfdirstatedirs
104 perfdirstatedirs
105 (no help text available)
105 (no help text available)
106 perfdirstatefoldmap
106 perfdirstatefoldmap
107 (no help text available)
107 (no help text available)
108 perfdirstatewrite
108 perfdirstatewrite
109 (no help text available)
109 (no help text available)
110 perfdiscovery
110 perfdiscovery
111 benchmark discovery between local repo and the peer at given
111 benchmark discovery between local repo and the peer at given
112 path
112 path
113 perffncacheencode
113 perffncacheencode
114 (no help text available)
114 (no help text available)
115 perffncacheload
115 perffncacheload
116 (no help text available)
116 (no help text available)
117 perffncachewrite
117 perffncachewrite
118 (no help text available)
118 (no help text available)
119 perfheads benchmark the computation of a changelog heads
119 perfheads benchmark the computation of a changelog heads
120 perfhelper-pathcopies
120 perfhelper-pathcopies
121 find statistic about potential parameters for the
121 find statistic about potential parameters for the
122 'perftracecopies'
122 'perftracecopies'
123 perfignore benchmark operation related to computing ignore
123 perfignore benchmark operation related to computing ignore
124 perfindex benchmark index creation time followed by a lookup
124 perfindex benchmark index creation time followed by a lookup
125 perflinelogedits
125 perflinelogedits
126 (no help text available)
126 (no help text available)
127 perfloadmarkers
127 perfloadmarkers
128 benchmark the time to parse the on-disk markers for a repo
128 benchmark the time to parse the on-disk markers for a repo
129 perflog (no help text available)
129 perflog (no help text available)
130 perflookup (no help text available)
130 perflookup (no help text available)
131 perflrucachedict
131 perflrucachedict
132 (no help text available)
132 (no help text available)
133 perfmanifest benchmark the time to read a manifest from disk and return a
133 perfmanifest benchmark the time to read a manifest from disk and return a
134 usable
134 usable
135 perfmergecalculate
135 perfmergecalculate
136 (no help text available)
136 (no help text available)
137 perfmoonwalk benchmark walking the changelog backwards
137 perfmoonwalk benchmark walking the changelog backwards
138 perfnodelookup
138 perfnodelookup
139 (no help text available)
139 (no help text available)
140 perfnodemap benchmark the time necessary to look up revision from a cold
140 perfnodemap benchmark the time necessary to look up revision from a cold
141 nodemap
141 nodemap
142 perfparents benchmark the time necessary to fetch one changeset's parents.
142 perfparents benchmark the time necessary to fetch one changeset's parents.
143 perfpathcopies
143 perfpathcopies
144 benchmark the copy tracing logic
144 benchmark the copy tracing logic
145 perfphases benchmark phasesets computation
145 perfphases benchmark phasesets computation
146 perfphasesremote
146 perfphasesremote
147 benchmark time needed to analyse phases of the remote server
147 benchmark time needed to analyse phases of the remote server
148 perfprogress printing of progress bars
148 perfprogress printing of progress bars
149 perfrawfiles (no help text available)
149 perfrawfiles (no help text available)
150 perfrevlogchunks
150 perfrevlogchunks
151 Benchmark operations on revlog chunks.
151 Benchmark operations on revlog chunks.
152 perfrevlogindex
152 perfrevlogindex
153 Benchmark operations against a revlog index.
153 Benchmark operations against a revlog index.
154 perfrevlogrevision
154 perfrevlogrevision
155 Benchmark obtaining a revlog revision.
155 Benchmark obtaining a revlog revision.
156 perfrevlogrevisions
156 perfrevlogrevisions
157 Benchmark reading a series of revisions from a revlog.
157 Benchmark reading a series of revisions from a revlog.
158 perfrevlogwrite
158 perfrevlogwrite
159 Benchmark writing a series of revisions to a revlog.
159 Benchmark writing a series of revisions to a revlog.
160 perfrevrange (no help text available)
160 perfrevrange (no help text available)
161 perfrevset benchmark the execution time of a revset
161 perfrevset benchmark the execution time of a revset
162 perfstartup (no help text available)
162 perfstartup (no help text available)
163 perfstatus (no help text available)
163 perfstatus (no help text available)
164 perftags (no help text available)
164 perftags (no help text available)
165 perftemplating
165 perftemplating
166 test the rendering time of a given template
166 test the rendering time of a given template
167 perfunidiff benchmark a unified diff between revisions
167 perfunidiff benchmark a unified diff between revisions
168 perfvolatilesets
168 perfvolatilesets
169 benchmark the computation of various volatile set
169 benchmark the computation of various volatile set
170 perfwalk (no help text available)
170 perfwalk (no help text available)
171 perfwrite microbenchmark ui.write
171 perfwrite microbenchmark ui.write
172
172
173 (use 'hg help -v perf' to show built-in aliases and global options)
173 (use 'hg help -v perf' to show built-in aliases and global options)
174 $ hg perfaddremove
174 $ hg perfaddremove
175 $ hg perfancestors
175 $ hg perfancestors
176 $ hg perfancestorset 2
176 $ hg perfancestorset 2
177 $ hg perfannotate a
177 $ hg perfannotate a
178 $ hg perfbdiff -c 1
178 $ hg perfbdiff -c 1
179 $ hg perfbdiff --alldata 1
179 $ hg perfbdiff --alldata 1
180 $ hg perfunidiff -c 1
180 $ hg perfunidiff -c 1
181 $ hg perfunidiff --alldata 1
181 $ hg perfunidiff --alldata 1
182 $ hg perfbookmarks
182 $ hg perfbookmarks
183 $ hg perfbranchmap
183 $ hg perfbranchmap
184 $ hg perfbranchmapload
184 $ hg perfbranchmapload
185 $ hg perfbranchmapupdate --base "not tip" --target "tip"
185 $ hg perfbranchmapupdate --base "not tip" --target "tip"
186 benchmark of branchmap with 3 revisions with 1 new ones
186 benchmark of branchmap with 3 revisions with 1 new ones
187 $ hg perfcca
187 $ hg perfcca
188 $ hg perfchangegroupchangelog
188 $ hg perfchangegroupchangelog
189 $ hg perfchangegroupchangelog --cgversion 01
189 $ hg perfchangegroupchangelog --cgversion 01
190 $ hg perfchangeset 2
190 $ hg perfchangeset 2
191 $ hg perfctxfiles 2
191 $ hg perfctxfiles 2
192 $ hg perfdiffwd
192 $ hg perfdiffwd
193 $ hg perfdirfoldmap
193 $ hg perfdirfoldmap
194 $ hg perfdirs
194 $ hg perfdirs
195 $ hg perfdirstate
195 $ hg perfdirstate
196 $ hg perfdirstatedirs
196 $ hg perfdirstatedirs
197 $ hg perfdirstatefoldmap
197 $ hg perfdirstatefoldmap
198 $ hg perfdirstatewrite
198 $ hg perfdirstatewrite
199 #if repofncache
199 #if repofncache
200 $ hg perffncacheencode
200 $ hg perffncacheencode
201 $ hg perffncacheload
201 $ hg perffncacheload
202 $ hg debugrebuildfncache
202 $ hg debugrebuildfncache
203 fncache already up to date
203 fncache already up to date
204 $ hg perffncachewrite
204 $ hg perffncachewrite
205 $ hg debugrebuildfncache
205 $ hg debugrebuildfncache
206 fncache already up to date
206 fncache already up to date
207 #endif
207 #endif
208 $ hg perfheads
208 $ hg perfheads
209 $ hg perfignore
209 $ hg perfignore
210 $ hg perfindex
210 $ hg perfindex
211 $ hg perflinelogedits -n 1
211 $ hg perflinelogedits -n 1
212 $ hg perfloadmarkers
212 $ hg perfloadmarkers
213 $ hg perflog
213 $ hg perflog
214 $ hg perflookup 2
214 $ hg perflookup 2
215 $ hg perflrucache
215 $ hg perflrucache
216 $ hg perfmanifest 2
216 $ hg perfmanifest 2
217 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
217 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
218 $ hg perfmanifest -m 44fe2c8352bb
218 $ hg perfmanifest -m 44fe2c8352bb
219 abort: manifest revision must be integer or full node
219 abort: manifest revision must be integer or full node
220 [255]
220 [255]
221 $ hg perfmergecalculate -r 3
221 $ hg perfmergecalculate -r 3
222 $ hg perfmoonwalk
222 $ hg perfmoonwalk
223 $ hg perfnodelookup 2
223 $ hg perfnodelookup 2
224 $ hg perfpathcopies 1 2
224 $ hg perfpathcopies 1 2
225 $ hg perfprogress --total 1000
225 $ hg perfprogress --total 1000
226 $ hg perfrawfiles 2
226 $ hg perfrawfiles 2
227 $ hg perfrevlogindex -c
227 $ hg perfrevlogindex -c
228 #if reporevlogstore
228 #if reporevlogstore
229 $ hg perfrevlogrevisions .hg/store/data/a.i
229 $ hg perfrevlogrevisions .hg/store/data/a.i
230 #endif
230 #endif
231 $ hg perfrevlogrevision -m 0
231 $ hg perfrevlogrevision -m 0
232 $ hg perfrevlogchunks -c
232 $ hg perfrevlogchunks -c
233 $ hg perfrevrange
233 $ hg perfrevrange
234 $ hg perfrevset 'all()'
234 $ hg perfrevset 'all()'
235 $ hg perfstartup
235 $ hg perfstartup
236 $ hg perfstatus
236 $ hg perfstatus
237 $ hg perftags
237 $ hg perftags
238 $ hg perftemplating
238 $ hg perftemplating
239 $ hg perfvolatilesets
239 $ hg perfvolatilesets
240 $ hg perfwalk
240 $ hg perfwalk
241 $ hg perfparents
241 $ hg perfparents
242 $ hg perfdiscovery -q .
242 $ hg perfdiscovery -q .
243
243
244 Test run control
244 Test run control
245 ----------------
245 ----------------
246
246
247 Simple single entry
247 Simple single entry
248
248
249 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
249 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
250 ! wall * comb * user * sys * (best of 15) (glob)
250 ! wall * comb * user * sys * (best of 15) (glob)
251
251
252 Multiple entries
252 Multiple entries
253
253
254 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
254 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
255 ! wall * comb * user * sys * (best of 5) (glob)
255 ! wall * comb * user * sys * (best of 5) (glob)
256
256
257 error case are ignored
257 error case are ignored
258
258
259 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
259 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
260 malformatted run limit entry, missing "-": 500
260 malformatted run limit entry, missing "-": 500
261 ! wall * comb * user * sys * (best of 5) (glob)
261 ! wall * comb * user * sys * (best of 5) (glob)
262 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
262 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
263 malformatted run limit entry, could not convert string to float: aaa: aaa-12
263 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
264 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
264 ! wall * comb * user * sys * (best of 5) (glob)
265 ! wall * comb * user * sys * (best of 5) (glob)
265 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
266 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
267 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
267 ! wall * comb * user * sys * (best of 5) (glob)
268 ! wall * comb * user * sys * (best of 5) (glob)
268
269
269 test actual output
270 test actual output
270 ------------------
271 ------------------
271
272
272 normal output:
273 normal output:
273
274
274 $ hg perfheads --config perf.stub=no
275 $ hg perfheads --config perf.stub=no
275 ! wall * comb * user * sys * (best of *) (glob)
276 ! wall * comb * user * sys * (best of *) (glob)
276
277
277 detailed output:
278 detailed output:
278
279
279 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
280 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
280 ! wall * comb * user * sys * (best of *) (glob)
281 ! wall * comb * user * sys * (best of *) (glob)
281 ! wall * comb * user * sys * (max of *) (glob)
282 ! wall * comb * user * sys * (max of *) (glob)
282 ! wall * comb * user * sys * (avg of *) (glob)
283 ! wall * comb * user * sys * (avg of *) (glob)
283 ! wall * comb * user * sys * (median of *) (glob)
284 ! wall * comb * user * sys * (median of *) (glob)
284
285
285 test json output
286 test json output
286 ----------------
287 ----------------
287
288
288 normal output:
289 normal output:
289
290
290 $ hg perfheads --template json --config perf.stub=no
291 $ hg perfheads --template json --config perf.stub=no
291 [
292 [
292 {
293 {
293 "comb": *, (glob)
294 "comb": *, (glob)
294 "count": *, (glob)
295 "count": *, (glob)
295 "sys": *, (glob)
296 "sys": *, (glob)
296 "user": *, (glob)
297 "user": *, (glob)
297 "wall": * (glob)
298 "wall": * (glob)
298 }
299 }
299 ]
300 ]
300
301
301 detailed output:
302 detailed output:
302
303
303 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
304 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
304 [
305 [
305 {
306 {
306 "avg.comb": *, (glob)
307 "avg.comb": *, (glob)
307 "avg.count": *, (glob)
308 "avg.count": *, (glob)
308 "avg.sys": *, (glob)
309 "avg.sys": *, (glob)
309 "avg.user": *, (glob)
310 "avg.user": *, (glob)
310 "avg.wall": *, (glob)
311 "avg.wall": *, (glob)
311 "comb": *, (glob)
312 "comb": *, (glob)
312 "count": *, (glob)
313 "count": *, (glob)
313 "max.comb": *, (glob)
314 "max.comb": *, (glob)
314 "max.count": *, (glob)
315 "max.count": *, (glob)
315 "max.sys": *, (glob)
316 "max.sys": *, (glob)
316 "max.user": *, (glob)
317 "max.user": *, (glob)
317 "max.wall": *, (glob)
318 "max.wall": *, (glob)
318 "median.comb": *, (glob)
319 "median.comb": *, (glob)
319 "median.count": *, (glob)
320 "median.count": *, (glob)
320 "median.sys": *, (glob)
321 "median.sys": *, (glob)
321 "median.user": *, (glob)
322 "median.user": *, (glob)
322 "median.wall": *, (glob)
323 "median.wall": *, (glob)
323 "sys": *, (glob)
324 "sys": *, (glob)
324 "user": *, (glob)
325 "user": *, (glob)
325 "wall": * (glob)
326 "wall": * (glob)
326 }
327 }
327 ]
328 ]
328
329
329 Check perf.py for historical portability
330 Check perf.py for historical portability
330 ----------------------------------------
331 ----------------------------------------
331
332
332 $ cd "$TESTDIR/.."
333 $ cd "$TESTDIR/.."
333
334
334 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
335 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
335 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
336 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
336 > "$TESTDIR"/check-perf-code.py contrib/perf.py
337 > "$TESTDIR"/check-perf-code.py contrib/perf.py
337 contrib/perf.py:\d+: (re)
338 contrib/perf.py:\d+: (re)
338 > from mercurial import (
339 > from mercurial import (
339 import newer module separately in try clause for early Mercurial
340 import newer module separately in try clause for early Mercurial
340 contrib/perf.py:\d+: (re)
341 contrib/perf.py:\d+: (re)
341 > from mercurial import (
342 > from mercurial import (
342 import newer module separately in try clause for early Mercurial
343 import newer module separately in try clause for early Mercurial
343 contrib/perf.py:\d+: (re)
344 contrib/perf.py:\d+: (re)
344 > origindexpath = orig.opener.join(orig.indexfile)
345 > origindexpath = orig.opener.join(orig.indexfile)
345 use getvfs()/getsvfs() for early Mercurial
346 use getvfs()/getsvfs() for early Mercurial
346 contrib/perf.py:\d+: (re)
347 contrib/perf.py:\d+: (re)
347 > origdatapath = orig.opener.join(orig.datafile)
348 > origdatapath = orig.opener.join(orig.datafile)
348 use getvfs()/getsvfs() for early Mercurial
349 use getvfs()/getsvfs() for early Mercurial
349 contrib/perf.py:\d+: (re)
350 contrib/perf.py:\d+: (re)
350 > vfs = vfsmod.vfs(tmpdir)
351 > vfs = vfsmod.vfs(tmpdir)
351 use getvfs()/getsvfs() for early Mercurial
352 use getvfs()/getsvfs() for early Mercurial
352 contrib/perf.py:\d+: (re)
353 contrib/perf.py:\d+: (re)
353 > vfs.options = getattr(orig.opener, 'options', None)
354 > vfs.options = getattr(orig.opener, 'options', None)
354 use getvfs()/getsvfs() for early Mercurial
355 use getvfs()/getsvfs() for early Mercurial
355 [1]
356 [1]
General Comments 0
You need to be logged in to leave comments. Login now