##// END OF EJS Templates
perf: copyedit a few documentation strings...
Augie Fackler -
r42188:4c700c84 default
parent child Browse files
Show More
@@ -1,2857 +1,2858 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistic will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of run (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``run-limits``
18 ``run-limits``
19 Control the number of run each benchmark will perform. The option value
19 Control the number of runs each benchmark will perform. The option value
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
21 condition are considered in order with the following logic:
21 conditions are considered in order with the following logic:
22
22
23 If benchmark have been running for <time> seconds, and we have performed
23 If benchmark has been running for <time> seconds, and we have performed
24 <numberofrun> iterations, stop the benchmark,
24 <numberofrun> iterations, stop the benchmark,
25
25
26 The default value is: `3.0-100, 10.0-3`
26 The default value is: `3.0-100, 10.0-3`
27
27
28 ``stub``
28 ``stub``
29 When set, benchmark will only be run once, useful for testing (default: off)
29 When set, benchmarks will only be run once, useful for testing
30 (default: off)
30 '''
31 '''
31
32
32 # "historical portability" policy of perf.py:
33 # "historical portability" policy of perf.py:
33 #
34 #
34 # We have to do:
35 # We have to do:
35 # - make perf.py "loadable" with as wide Mercurial version as possible
36 # - make perf.py "loadable" with as wide Mercurial version as possible
36 # This doesn't mean that perf commands work correctly with that Mercurial.
37 # This doesn't mean that perf commands work correctly with that Mercurial.
37 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
38 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
38 # - make historical perf command work correctly with as wide Mercurial
39 # - make historical perf command work correctly with as wide Mercurial
39 # version as possible
40 # version as possible
40 #
41 #
41 # We have to do, if possible with reasonable cost:
42 # We have to do, if possible with reasonable cost:
42 # - make recent perf command for historical feature work correctly
43 # - make recent perf command for historical feature work correctly
43 # with early Mercurial
44 # with early Mercurial
44 #
45 #
45 # We don't have to do:
46 # We don't have to do:
46 # - make perf command for recent feature work correctly with early
47 # - make perf command for recent feature work correctly with early
47 # Mercurial
48 # Mercurial
48
49
49 from __future__ import absolute_import
50 from __future__ import absolute_import
50 import contextlib
51 import contextlib
51 import functools
52 import functools
52 import gc
53 import gc
53 import os
54 import os
54 import random
55 import random
55 import shutil
56 import shutil
56 import struct
57 import struct
57 import sys
58 import sys
58 import tempfile
59 import tempfile
59 import threading
60 import threading
60 import time
61 import time
61 from mercurial import (
62 from mercurial import (
62 changegroup,
63 changegroup,
63 cmdutil,
64 cmdutil,
64 commands,
65 commands,
65 copies,
66 copies,
66 error,
67 error,
67 extensions,
68 extensions,
68 hg,
69 hg,
69 mdiff,
70 mdiff,
70 merge,
71 merge,
71 revlog,
72 revlog,
72 util,
73 util,
73 )
74 )
74
75
75 # for "historical portability":
76 # for "historical portability":
76 # try to import modules separately (in dict order), and ignore
77 # try to import modules separately (in dict order), and ignore
77 # failure, because these aren't available with early Mercurial
78 # failure, because these aren't available with early Mercurial
78 try:
79 try:
79 from mercurial import branchmap # since 2.5 (or bcee63733aad)
80 from mercurial import branchmap # since 2.5 (or bcee63733aad)
80 except ImportError:
81 except ImportError:
81 pass
82 pass
82 try:
83 try:
83 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
84 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
84 except ImportError:
85 except ImportError:
85 pass
86 pass
86 try:
87 try:
87 from mercurial import registrar # since 3.7 (or 37d50250b696)
88 from mercurial import registrar # since 3.7 (or 37d50250b696)
88 dir(registrar) # forcibly load it
89 dir(registrar) # forcibly load it
89 except ImportError:
90 except ImportError:
90 registrar = None
91 registrar = None
91 try:
92 try:
92 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
93 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
93 except ImportError:
94 except ImportError:
94 pass
95 pass
95 try:
96 try:
96 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
97 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
97 except ImportError:
98 except ImportError:
98 pass
99 pass
99 try:
100 try:
100 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
101 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
101 except ImportError:
102 except ImportError:
102 pass
103 pass
103
104
104
105
105 def identity(a):
106 def identity(a):
106 return a
107 return a
107
108
108 try:
109 try:
109 from mercurial import pycompat
110 from mercurial import pycompat
110 getargspec = pycompat.getargspec # added to module after 4.5
111 getargspec = pycompat.getargspec # added to module after 4.5
111 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
112 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
112 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
113 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
113 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
114 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
114 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
115 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
115 if pycompat.ispy3:
116 if pycompat.ispy3:
116 _maxint = sys.maxsize # per py3 docs for replacing maxint
117 _maxint = sys.maxsize # per py3 docs for replacing maxint
117 else:
118 else:
118 _maxint = sys.maxint
119 _maxint = sys.maxint
119 except (ImportError, AttributeError):
120 except (ImportError, AttributeError):
120 import inspect
121 import inspect
121 getargspec = inspect.getargspec
122 getargspec = inspect.getargspec
122 _byteskwargs = identity
123 _byteskwargs = identity
123 fsencode = identity # no py3 support
124 fsencode = identity # no py3 support
124 _maxint = sys.maxint # no py3 support
125 _maxint = sys.maxint # no py3 support
125 _sysstr = lambda x: x # no py3 support
126 _sysstr = lambda x: x # no py3 support
126 _xrange = xrange
127 _xrange = xrange
127
128
128 try:
129 try:
129 # 4.7+
130 # 4.7+
130 queue = pycompat.queue.Queue
131 queue = pycompat.queue.Queue
131 except (AttributeError, ImportError):
132 except (AttributeError, ImportError):
132 # <4.7.
133 # <4.7.
133 try:
134 try:
134 queue = pycompat.queue
135 queue = pycompat.queue
135 except (AttributeError, ImportError):
136 except (AttributeError, ImportError):
136 queue = util.queue
137 queue = util.queue
137
138
138 try:
139 try:
139 from mercurial import logcmdutil
140 from mercurial import logcmdutil
140 makelogtemplater = logcmdutil.maketemplater
141 makelogtemplater = logcmdutil.maketemplater
141 except (AttributeError, ImportError):
142 except (AttributeError, ImportError):
142 try:
143 try:
143 makelogtemplater = cmdutil.makelogtemplater
144 makelogtemplater = cmdutil.makelogtemplater
144 except (AttributeError, ImportError):
145 except (AttributeError, ImportError):
145 makelogtemplater = None
146 makelogtemplater = None
146
147
147 # for "historical portability":
148 # for "historical portability":
148 # define util.safehasattr forcibly, because util.safehasattr has been
149 # define util.safehasattr forcibly, because util.safehasattr has been
149 # available since 1.9.3 (or 94b200a11cf7)
150 # available since 1.9.3 (or 94b200a11cf7)
150 _undefined = object()
151 _undefined = object()
151 def safehasattr(thing, attr):
152 def safehasattr(thing, attr):
152 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
153 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
153 setattr(util, 'safehasattr', safehasattr)
154 setattr(util, 'safehasattr', safehasattr)
154
155
155 # for "historical portability":
156 # for "historical portability":
156 # define util.timer forcibly, because util.timer has been available
157 # define util.timer forcibly, because util.timer has been available
157 # since ae5d60bb70c9
158 # since ae5d60bb70c9
158 if safehasattr(time, 'perf_counter'):
159 if safehasattr(time, 'perf_counter'):
159 util.timer = time.perf_counter
160 util.timer = time.perf_counter
160 elif os.name == b'nt':
161 elif os.name == b'nt':
161 util.timer = time.clock
162 util.timer = time.clock
162 else:
163 else:
163 util.timer = time.time
164 util.timer = time.time
164
165
165 # for "historical portability":
166 # for "historical portability":
166 # use locally defined empty option list, if formatteropts isn't
167 # use locally defined empty option list, if formatteropts isn't
167 # available, because commands.formatteropts has been available since
168 # available, because commands.formatteropts has been available since
168 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
169 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
169 # available since 2.2 (or ae5f92e154d3)
170 # available since 2.2 (or ae5f92e154d3)
170 formatteropts = getattr(cmdutil, "formatteropts",
171 formatteropts = getattr(cmdutil, "formatteropts",
171 getattr(commands, "formatteropts", []))
172 getattr(commands, "formatteropts", []))
172
173
173 # for "historical portability":
174 # for "historical portability":
174 # use locally defined option list, if debugrevlogopts isn't available,
175 # use locally defined option list, if debugrevlogopts isn't available,
175 # because commands.debugrevlogopts has been available since 3.7 (or
176 # because commands.debugrevlogopts has been available since 3.7 (or
176 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
177 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
177 # since 1.9 (or a79fea6b3e77).
178 # since 1.9 (or a79fea6b3e77).
178 revlogopts = getattr(cmdutil, "debugrevlogopts",
179 revlogopts = getattr(cmdutil, "debugrevlogopts",
179 getattr(commands, "debugrevlogopts", [
180 getattr(commands, "debugrevlogopts", [
180 (b'c', b'changelog', False, (b'open changelog')),
181 (b'c', b'changelog', False, (b'open changelog')),
181 (b'm', b'manifest', False, (b'open manifest')),
182 (b'm', b'manifest', False, (b'open manifest')),
182 (b'', b'dir', False, (b'open directory manifest')),
183 (b'', b'dir', False, (b'open directory manifest')),
183 ]))
184 ]))
184
185
185 cmdtable = {}
186 cmdtable = {}
186
187
187 # for "historical portability":
188 # for "historical portability":
188 # define parsealiases locally, because cmdutil.parsealiases has been
189 # define parsealiases locally, because cmdutil.parsealiases has been
189 # available since 1.5 (or 6252852b4332)
190 # available since 1.5 (or 6252852b4332)
190 def parsealiases(cmd):
191 def parsealiases(cmd):
191 return cmd.split(b"|")
192 return cmd.split(b"|")
192
193
193 if safehasattr(registrar, 'command'):
194 if safehasattr(registrar, 'command'):
194 command = registrar.command(cmdtable)
195 command = registrar.command(cmdtable)
195 elif safehasattr(cmdutil, 'command'):
196 elif safehasattr(cmdutil, 'command'):
196 command = cmdutil.command(cmdtable)
197 command = cmdutil.command(cmdtable)
197 if b'norepo' not in getargspec(command).args:
198 if b'norepo' not in getargspec(command).args:
198 # for "historical portability":
199 # for "historical portability":
199 # wrap original cmdutil.command, because "norepo" option has
200 # wrap original cmdutil.command, because "norepo" option has
200 # been available since 3.1 (or 75a96326cecb)
201 # been available since 3.1 (or 75a96326cecb)
201 _command = command
202 _command = command
202 def command(name, options=(), synopsis=None, norepo=False):
203 def command(name, options=(), synopsis=None, norepo=False):
203 if norepo:
204 if norepo:
204 commands.norepo += b' %s' % b' '.join(parsealiases(name))
205 commands.norepo += b' %s' % b' '.join(parsealiases(name))
205 return _command(name, list(options), synopsis)
206 return _command(name, list(options), synopsis)
206 else:
207 else:
207 # for "historical portability":
208 # for "historical portability":
208 # define "@command" annotation locally, because cmdutil.command
209 # define "@command" annotation locally, because cmdutil.command
209 # has been available since 1.9 (or 2daa5179e73f)
210 # has been available since 1.9 (or 2daa5179e73f)
210 def command(name, options=(), synopsis=None, norepo=False):
211 def command(name, options=(), synopsis=None, norepo=False):
211 def decorator(func):
212 def decorator(func):
212 if synopsis:
213 if synopsis:
213 cmdtable[name] = func, list(options), synopsis
214 cmdtable[name] = func, list(options), synopsis
214 else:
215 else:
215 cmdtable[name] = func, list(options)
216 cmdtable[name] = func, list(options)
216 if norepo:
217 if norepo:
217 commands.norepo += b' %s' % b' '.join(parsealiases(name))
218 commands.norepo += b' %s' % b' '.join(parsealiases(name))
218 return func
219 return func
219 return decorator
220 return decorator
220
221
221 try:
222 try:
222 import mercurial.registrar
223 import mercurial.registrar
223 import mercurial.configitems
224 import mercurial.configitems
224 configtable = {}
225 configtable = {}
225 configitem = mercurial.registrar.configitem(configtable)
226 configitem = mercurial.registrar.configitem(configtable)
226 configitem(b'perf', b'presleep',
227 configitem(b'perf', b'presleep',
227 default=mercurial.configitems.dynamicdefault,
228 default=mercurial.configitems.dynamicdefault,
228 )
229 )
229 configitem(b'perf', b'stub',
230 configitem(b'perf', b'stub',
230 default=mercurial.configitems.dynamicdefault,
231 default=mercurial.configitems.dynamicdefault,
231 )
232 )
232 configitem(b'perf', b'parentscount',
233 configitem(b'perf', b'parentscount',
233 default=mercurial.configitems.dynamicdefault,
234 default=mercurial.configitems.dynamicdefault,
234 )
235 )
235 configitem(b'perf', b'all-timing',
236 configitem(b'perf', b'all-timing',
236 default=mercurial.configitems.dynamicdefault,
237 default=mercurial.configitems.dynamicdefault,
237 )
238 )
238 configitem(b'perf', b'run-limits',
239 configitem(b'perf', b'run-limits',
239 default=mercurial.configitems.dynamicdefault,
240 default=mercurial.configitems.dynamicdefault,
240 )
241 )
241 except (ImportError, AttributeError):
242 except (ImportError, AttributeError):
242 pass
243 pass
243
244
244 def getlen(ui):
245 def getlen(ui):
245 if ui.configbool(b"perf", b"stub", False):
246 if ui.configbool(b"perf", b"stub", False):
246 return lambda x: 1
247 return lambda x: 1
247 return len
248 return len
248
249
249 def gettimer(ui, opts=None):
250 def gettimer(ui, opts=None):
250 """return a timer function and formatter: (timer, formatter)
251 """return a timer function and formatter: (timer, formatter)
251
252
252 This function exists to gather the creation of formatter in a single
253 This function exists to gather the creation of formatter in a single
253 place instead of duplicating it in all performance commands."""
254 place instead of duplicating it in all performance commands."""
254
255
255 # enforce an idle period before execution to counteract power management
256 # enforce an idle period before execution to counteract power management
256 # experimental config: perf.presleep
257 # experimental config: perf.presleep
257 time.sleep(getint(ui, b"perf", b"presleep", 1))
258 time.sleep(getint(ui, b"perf", b"presleep", 1))
258
259
259 if opts is None:
260 if opts is None:
260 opts = {}
261 opts = {}
261 # redirect all to stderr unless buffer api is in use
262 # redirect all to stderr unless buffer api is in use
262 if not ui._buffers:
263 if not ui._buffers:
263 ui = ui.copy()
264 ui = ui.copy()
264 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
265 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
265 if uifout:
266 if uifout:
266 # for "historical portability":
267 # for "historical portability":
267 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
268 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
268 uifout.set(ui.ferr)
269 uifout.set(ui.ferr)
269
270
270 # get a formatter
271 # get a formatter
271 uiformatter = getattr(ui, 'formatter', None)
272 uiformatter = getattr(ui, 'formatter', None)
272 if uiformatter:
273 if uiformatter:
273 fm = uiformatter(b'perf', opts)
274 fm = uiformatter(b'perf', opts)
274 else:
275 else:
275 # for "historical portability":
276 # for "historical portability":
276 # define formatter locally, because ui.formatter has been
277 # define formatter locally, because ui.formatter has been
277 # available since 2.2 (or ae5f92e154d3)
278 # available since 2.2 (or ae5f92e154d3)
278 from mercurial import node
279 from mercurial import node
279 class defaultformatter(object):
280 class defaultformatter(object):
280 """Minimized composition of baseformatter and plainformatter
281 """Minimized composition of baseformatter and plainformatter
281 """
282 """
282 def __init__(self, ui, topic, opts):
283 def __init__(self, ui, topic, opts):
283 self._ui = ui
284 self._ui = ui
284 if ui.debugflag:
285 if ui.debugflag:
285 self.hexfunc = node.hex
286 self.hexfunc = node.hex
286 else:
287 else:
287 self.hexfunc = node.short
288 self.hexfunc = node.short
288 def __nonzero__(self):
289 def __nonzero__(self):
289 return False
290 return False
290 __bool__ = __nonzero__
291 __bool__ = __nonzero__
291 def startitem(self):
292 def startitem(self):
292 pass
293 pass
293 def data(self, **data):
294 def data(self, **data):
294 pass
295 pass
295 def write(self, fields, deftext, *fielddata, **opts):
296 def write(self, fields, deftext, *fielddata, **opts):
296 self._ui.write(deftext % fielddata, **opts)
297 self._ui.write(deftext % fielddata, **opts)
297 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
298 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
298 if cond:
299 if cond:
299 self._ui.write(deftext % fielddata, **opts)
300 self._ui.write(deftext % fielddata, **opts)
300 def plain(self, text, **opts):
301 def plain(self, text, **opts):
301 self._ui.write(text, **opts)
302 self._ui.write(text, **opts)
302 def end(self):
303 def end(self):
303 pass
304 pass
304 fm = defaultformatter(ui, b'perf', opts)
305 fm = defaultformatter(ui, b'perf', opts)
305
306
306 # stub function, runs code only once instead of in a loop
307 # stub function, runs code only once instead of in a loop
307 # experimental config: perf.stub
308 # experimental config: perf.stub
308 if ui.configbool(b"perf", b"stub", False):
309 if ui.configbool(b"perf", b"stub", False):
309 return functools.partial(stub_timer, fm), fm
310 return functools.partial(stub_timer, fm), fm
310
311
311 # experimental config: perf.all-timing
312 # experimental config: perf.all-timing
312 displayall = ui.configbool(b"perf", b"all-timing", False)
313 displayall = ui.configbool(b"perf", b"all-timing", False)
313
314
314 # experimental config: perf.run-limits
315 # experimental config: perf.run-limits
315 limitspec = ui.configlist(b"perf", b"run-limits", [])
316 limitspec = ui.configlist(b"perf", b"run-limits", [])
316 limits = []
317 limits = []
317 for item in limitspec:
318 for item in limitspec:
318 parts = item.split('-', 1)
319 parts = item.split('-', 1)
319 if len(parts) < 2:
320 if len(parts) < 2:
320 ui.warn(('malformatted run limit entry, missing "-": %s\n'
321 ui.warn(('malformatted run limit entry, missing "-": %s\n'
321 % item))
322 % item))
322 continue
323 continue
323 try:
324 try:
324 time_limit = float(parts[0])
325 time_limit = float(parts[0])
325 except ValueError as e:
326 except ValueError as e:
326 ui.warn(('malformatted run limit entry, %s: %s\n'
327 ui.warn(('malformatted run limit entry, %s: %s\n'
327 % (e, item)))
328 % (e, item)))
328 continue
329 continue
329 try:
330 try:
330 run_limit = int(parts[1])
331 run_limit = int(parts[1])
331 except ValueError as e:
332 except ValueError as e:
332 ui.warn(('malformatted run limit entry, %s: %s\n'
333 ui.warn(('malformatted run limit entry, %s: %s\n'
333 % (e, item)))
334 % (e, item)))
334 continue
335 continue
335 limits.append((time_limit, run_limit))
336 limits.append((time_limit, run_limit))
336 if not limits:
337 if not limits:
337 limits = DEFAULTLIMITS
338 limits = DEFAULTLIMITS
338
339
339 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
340 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
340 return t, fm
341 return t, fm
341
342
342 def stub_timer(fm, func, setup=None, title=None):
343 def stub_timer(fm, func, setup=None, title=None):
343 if setup is not None:
344 if setup is not None:
344 setup()
345 setup()
345 func()
346 func()
346
347
347 @contextlib.contextmanager
348 @contextlib.contextmanager
348 def timeone():
349 def timeone():
349 r = []
350 r = []
350 ostart = os.times()
351 ostart = os.times()
351 cstart = util.timer()
352 cstart = util.timer()
352 yield r
353 yield r
353 cstop = util.timer()
354 cstop = util.timer()
354 ostop = os.times()
355 ostop = os.times()
355 a, b = ostart, ostop
356 a, b = ostart, ostop
356 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
357 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
357
358
358
359
359 # list of stop condition (elapsed time, minimal run count)
360 # list of stop condition (elapsed time, minimal run count)
360 DEFAULTLIMITS = (
361 DEFAULTLIMITS = (
361 (3.0, 100),
362 (3.0, 100),
362 (10.0, 3),
363 (10.0, 3),
363 )
364 )
364
365
365 def _timer(fm, func, setup=None, title=None, displayall=False,
366 def _timer(fm, func, setup=None, title=None, displayall=False,
366 limits=DEFAULTLIMITS):
367 limits=DEFAULTLIMITS):
367 gc.collect()
368 gc.collect()
368 results = []
369 results = []
369 begin = util.timer()
370 begin = util.timer()
370 count = 0
371 count = 0
371 keepgoing = True
372 keepgoing = True
372 while keepgoing:
373 while keepgoing:
373 if setup is not None:
374 if setup is not None:
374 setup()
375 setup()
375 with timeone() as item:
376 with timeone() as item:
376 r = func()
377 r = func()
377 count += 1
378 count += 1
378 results.append(item[0])
379 results.append(item[0])
379 cstop = util.timer()
380 cstop = util.timer()
380 # Look for a stop condition.
381 # Look for a stop condition.
381 elapsed = cstop - begin
382 elapsed = cstop - begin
382 for t, mincount in limits:
383 for t, mincount in limits:
383 if elapsed >= t and count >= mincount:
384 if elapsed >= t and count >= mincount:
384 keepgoing = False
385 keepgoing = False
385 break
386 break
386
387
387 formatone(fm, results, title=title, result=r,
388 formatone(fm, results, title=title, result=r,
388 displayall=displayall)
389 displayall=displayall)
389
390
390 def formatone(fm, timings, title=None, result=None, displayall=False):
391 def formatone(fm, timings, title=None, result=None, displayall=False):
391
392
392 count = len(timings)
393 count = len(timings)
393
394
394 fm.startitem()
395 fm.startitem()
395
396
396 if title:
397 if title:
397 fm.write(b'title', b'! %s\n', title)
398 fm.write(b'title', b'! %s\n', title)
398 if result:
399 if result:
399 fm.write(b'result', b'! result: %s\n', result)
400 fm.write(b'result', b'! result: %s\n', result)
400 def display(role, entry):
401 def display(role, entry):
401 prefix = b''
402 prefix = b''
402 if role != b'best':
403 if role != b'best':
403 prefix = b'%s.' % role
404 prefix = b'%s.' % role
404 fm.plain(b'!')
405 fm.plain(b'!')
405 fm.write(prefix + b'wall', b' wall %f', entry[0])
406 fm.write(prefix + b'wall', b' wall %f', entry[0])
406 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
407 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
407 fm.write(prefix + b'user', b' user %f', entry[1])
408 fm.write(prefix + b'user', b' user %f', entry[1])
408 fm.write(prefix + b'sys', b' sys %f', entry[2])
409 fm.write(prefix + b'sys', b' sys %f', entry[2])
409 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
410 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
410 fm.plain(b'\n')
411 fm.plain(b'\n')
411 timings.sort()
412 timings.sort()
412 min_val = timings[0]
413 min_val = timings[0]
413 display(b'best', min_val)
414 display(b'best', min_val)
414 if displayall:
415 if displayall:
415 max_val = timings[-1]
416 max_val = timings[-1]
416 display(b'max', max_val)
417 display(b'max', max_val)
417 avg = tuple([sum(x) / count for x in zip(*timings)])
418 avg = tuple([sum(x) / count for x in zip(*timings)])
418 display(b'avg', avg)
419 display(b'avg', avg)
419 median = timings[len(timings) // 2]
420 median = timings[len(timings) // 2]
420 display(b'median', median)
421 display(b'median', median)
421
422
422 # utilities for historical portability
423 # utilities for historical portability
423
424
424 def getint(ui, section, name, default):
425 def getint(ui, section, name, default):
425 # for "historical portability":
426 # for "historical portability":
426 # ui.configint has been available since 1.9 (or fa2b596db182)
427 # ui.configint has been available since 1.9 (or fa2b596db182)
427 v = ui.config(section, name, None)
428 v = ui.config(section, name, None)
428 if v is None:
429 if v is None:
429 return default
430 return default
430 try:
431 try:
431 return int(v)
432 return int(v)
432 except ValueError:
433 except ValueError:
433 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
434 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
434 % (section, name, v))
435 % (section, name, v))
435
436
436 def safeattrsetter(obj, name, ignoremissing=False):
437 def safeattrsetter(obj, name, ignoremissing=False):
437 """Ensure that 'obj' has 'name' attribute before subsequent setattr
438 """Ensure that 'obj' has 'name' attribute before subsequent setattr
438
439
439 This function is aborted, if 'obj' doesn't have 'name' attribute
440 This function is aborted, if 'obj' doesn't have 'name' attribute
440 at runtime. This avoids overlooking removal of an attribute, which
441 at runtime. This avoids overlooking removal of an attribute, which
441 breaks assumption of performance measurement, in the future.
442 breaks assumption of performance measurement, in the future.
442
443
443 This function returns the object to (1) assign a new value, and
444 This function returns the object to (1) assign a new value, and
444 (2) restore an original value to the attribute.
445 (2) restore an original value to the attribute.
445
446
446 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
447 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
447 abortion, and this function returns None. This is useful to
448 abortion, and this function returns None. This is useful to
448 examine an attribute, which isn't ensured in all Mercurial
449 examine an attribute, which isn't ensured in all Mercurial
449 versions.
450 versions.
450 """
451 """
451 if not util.safehasattr(obj, name):
452 if not util.safehasattr(obj, name):
452 if ignoremissing:
453 if ignoremissing:
453 return None
454 return None
454 raise error.Abort((b"missing attribute %s of %s might break assumption"
455 raise error.Abort((b"missing attribute %s of %s might break assumption"
455 b" of performance measurement") % (name, obj))
456 b" of performance measurement") % (name, obj))
456
457
457 origvalue = getattr(obj, _sysstr(name))
458 origvalue = getattr(obj, _sysstr(name))
458 class attrutil(object):
459 class attrutil(object):
459 def set(self, newvalue):
460 def set(self, newvalue):
460 setattr(obj, _sysstr(name), newvalue)
461 setattr(obj, _sysstr(name), newvalue)
461 def restore(self):
462 def restore(self):
462 setattr(obj, _sysstr(name), origvalue)
463 setattr(obj, _sysstr(name), origvalue)
463
464
464 return attrutil()
465 return attrutil()
465
466
466 # utilities to examine each internal API changes
467 # utilities to examine each internal API changes
467
468
468 def getbranchmapsubsettable():
469 def getbranchmapsubsettable():
469 # for "historical portability":
470 # for "historical portability":
470 # subsettable is defined in:
471 # subsettable is defined in:
471 # - branchmap since 2.9 (or 175c6fd8cacc)
472 # - branchmap since 2.9 (or 175c6fd8cacc)
472 # - repoview since 2.5 (or 59a9f18d4587)
473 # - repoview since 2.5 (or 59a9f18d4587)
473 for mod in (branchmap, repoview):
474 for mod in (branchmap, repoview):
474 subsettable = getattr(mod, 'subsettable', None)
475 subsettable = getattr(mod, 'subsettable', None)
475 if subsettable:
476 if subsettable:
476 return subsettable
477 return subsettable
477
478
478 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
479 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
479 # branchmap and repoview modules exist, but subsettable attribute
480 # branchmap and repoview modules exist, but subsettable attribute
480 # doesn't)
481 # doesn't)
481 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
482 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
482 hint=b"use 2.5 or later")
483 hint=b"use 2.5 or later")
483
484
484 def getsvfs(repo):
485 def getsvfs(repo):
485 """Return appropriate object to access files under .hg/store
486 """Return appropriate object to access files under .hg/store
486 """
487 """
487 # for "historical portability":
488 # for "historical portability":
488 # repo.svfs has been available since 2.3 (or 7034365089bf)
489 # repo.svfs has been available since 2.3 (or 7034365089bf)
489 svfs = getattr(repo, 'svfs', None)
490 svfs = getattr(repo, 'svfs', None)
490 if svfs:
491 if svfs:
491 return svfs
492 return svfs
492 else:
493 else:
493 return getattr(repo, 'sopener')
494 return getattr(repo, 'sopener')
494
495
495 def getvfs(repo):
496 def getvfs(repo):
496 """Return appropriate object to access files under .hg
497 """Return appropriate object to access files under .hg
497 """
498 """
498 # for "historical portability":
499 # for "historical portability":
499 # repo.vfs has been available since 2.3 (or 7034365089bf)
500 # repo.vfs has been available since 2.3 (or 7034365089bf)
500 vfs = getattr(repo, 'vfs', None)
501 vfs = getattr(repo, 'vfs', None)
501 if vfs:
502 if vfs:
502 return vfs
503 return vfs
503 else:
504 else:
504 return getattr(repo, 'opener')
505 return getattr(repo, 'opener')
505
506
506 def repocleartagscachefunc(repo):
507 def repocleartagscachefunc(repo):
507 """Return the function to clear tags cache according to repo internal API
508 """Return the function to clear tags cache according to repo internal API
508 """
509 """
509 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
510 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
510 # in this case, setattr(repo, '_tagscache', None) or so isn't
511 # in this case, setattr(repo, '_tagscache', None) or so isn't
511 # correct way to clear tags cache, because existing code paths
512 # correct way to clear tags cache, because existing code paths
512 # expect _tagscache to be a structured object.
513 # expect _tagscache to be a structured object.
513 def clearcache():
514 def clearcache():
514 # _tagscache has been filteredpropertycache since 2.5 (or
515 # _tagscache has been filteredpropertycache since 2.5 (or
515 # 98c867ac1330), and delattr() can't work in such case
516 # 98c867ac1330), and delattr() can't work in such case
516 if b'_tagscache' in vars(repo):
517 if b'_tagscache' in vars(repo):
517 del repo.__dict__[b'_tagscache']
518 del repo.__dict__[b'_tagscache']
518 return clearcache
519 return clearcache
519
520
520 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
521 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
521 if repotags: # since 1.4 (or 5614a628d173)
522 if repotags: # since 1.4 (or 5614a628d173)
522 return lambda : repotags.set(None)
523 return lambda : repotags.set(None)
523
524
524 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
525 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
525 if repotagscache: # since 0.6 (or d7df759d0e97)
526 if repotagscache: # since 0.6 (or d7df759d0e97)
526 return lambda : repotagscache.set(None)
527 return lambda : repotagscache.set(None)
527
528
528 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
529 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
529 # this point, but it isn't so problematic, because:
530 # this point, but it isn't so problematic, because:
530 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
531 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
531 # in perftags() causes failure soon
532 # in perftags() causes failure soon
532 # - perf.py itself has been available since 1.1 (or eb240755386d)
533 # - perf.py itself has been available since 1.1 (or eb240755386d)
533 raise error.Abort((b"tags API of this hg command is unknown"))
534 raise error.Abort((b"tags API of this hg command is unknown"))
534
535
535 # utilities to clear cache
536 # utilities to clear cache
536
537
537 def clearfilecache(obj, attrname):
538 def clearfilecache(obj, attrname):
538 unfiltered = getattr(obj, 'unfiltered', None)
539 unfiltered = getattr(obj, 'unfiltered', None)
539 if unfiltered is not None:
540 if unfiltered is not None:
540 obj = obj.unfiltered()
541 obj = obj.unfiltered()
541 if attrname in vars(obj):
542 if attrname in vars(obj):
542 delattr(obj, attrname)
543 delattr(obj, attrname)
543 obj._filecache.pop(attrname, None)
544 obj._filecache.pop(attrname, None)
544
545
545 def clearchangelog(repo):
546 def clearchangelog(repo):
546 if repo is not repo.unfiltered():
547 if repo is not repo.unfiltered():
547 object.__setattr__(repo, r'_clcachekey', None)
548 object.__setattr__(repo, r'_clcachekey', None)
548 object.__setattr__(repo, r'_clcache', None)
549 object.__setattr__(repo, r'_clcache', None)
549 clearfilecache(repo.unfiltered(), 'changelog')
550 clearfilecache(repo.unfiltered(), 'changelog')
550
551
551 # perf commands
552 # perf commands
552
553
553 @command(b'perfwalk', formatteropts)
554 @command(b'perfwalk', formatteropts)
554 def perfwalk(ui, repo, *pats, **opts):
555 def perfwalk(ui, repo, *pats, **opts):
555 opts = _byteskwargs(opts)
556 opts = _byteskwargs(opts)
556 timer, fm = gettimer(ui, opts)
557 timer, fm = gettimer(ui, opts)
557 m = scmutil.match(repo[None], pats, {})
558 m = scmutil.match(repo[None], pats, {})
558 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
559 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
559 ignored=False))))
560 ignored=False))))
560 fm.end()
561 fm.end()
561
562
562 @command(b'perfannotate', formatteropts)
563 @command(b'perfannotate', formatteropts)
563 def perfannotate(ui, repo, f, **opts):
564 def perfannotate(ui, repo, f, **opts):
564 opts = _byteskwargs(opts)
565 opts = _byteskwargs(opts)
565 timer, fm = gettimer(ui, opts)
566 timer, fm = gettimer(ui, opts)
566 fc = repo[b'.'][f]
567 fc = repo[b'.'][f]
567 timer(lambda: len(fc.annotate(True)))
568 timer(lambda: len(fc.annotate(True)))
568 fm.end()
569 fm.end()
569
570
570 @command(b'perfstatus',
571 @command(b'perfstatus',
571 [(b'u', b'unknown', False,
572 [(b'u', b'unknown', False,
572 b'ask status to look for unknown files')] + formatteropts)
573 b'ask status to look for unknown files')] + formatteropts)
573 def perfstatus(ui, repo, **opts):
574 def perfstatus(ui, repo, **opts):
574 opts = _byteskwargs(opts)
575 opts = _byteskwargs(opts)
575 #m = match.always(repo.root, repo.getcwd())
576 #m = match.always(repo.root, repo.getcwd())
576 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
577 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
577 # False))))
578 # False))))
578 timer, fm = gettimer(ui, opts)
579 timer, fm = gettimer(ui, opts)
579 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
580 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
580 fm.end()
581 fm.end()
581
582
582 @command(b'perfaddremove', formatteropts)
583 @command(b'perfaddremove', formatteropts)
583 def perfaddremove(ui, repo, **opts):
584 def perfaddremove(ui, repo, **opts):
584 opts = _byteskwargs(opts)
585 opts = _byteskwargs(opts)
585 timer, fm = gettimer(ui, opts)
586 timer, fm = gettimer(ui, opts)
586 try:
587 try:
587 oldquiet = repo.ui.quiet
588 oldquiet = repo.ui.quiet
588 repo.ui.quiet = True
589 repo.ui.quiet = True
589 matcher = scmutil.match(repo[None])
590 matcher = scmutil.match(repo[None])
590 opts[b'dry_run'] = True
591 opts[b'dry_run'] = True
591 if b'uipathfn' in getargspec(scmutil.addremove).args:
592 if b'uipathfn' in getargspec(scmutil.addremove).args:
592 uipathfn = scmutil.getuipathfn(repo)
593 uipathfn = scmutil.getuipathfn(repo)
593 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
594 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
594 else:
595 else:
595 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
596 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
596 finally:
597 finally:
597 repo.ui.quiet = oldquiet
598 repo.ui.quiet = oldquiet
598 fm.end()
599 fm.end()
599
600
600 def clearcaches(cl):
601 def clearcaches(cl):
601 # behave somewhat consistently across internal API changes
602 # behave somewhat consistently across internal API changes
602 if util.safehasattr(cl, b'clearcaches'):
603 if util.safehasattr(cl, b'clearcaches'):
603 cl.clearcaches()
604 cl.clearcaches()
604 elif util.safehasattr(cl, b'_nodecache'):
605 elif util.safehasattr(cl, b'_nodecache'):
605 from mercurial.node import nullid, nullrev
606 from mercurial.node import nullid, nullrev
606 cl._nodecache = {nullid: nullrev}
607 cl._nodecache = {nullid: nullrev}
607 cl._nodepos = None
608 cl._nodepos = None
608
609
609 @command(b'perfheads', formatteropts)
610 @command(b'perfheads', formatteropts)
610 def perfheads(ui, repo, **opts):
611 def perfheads(ui, repo, **opts):
611 """benchmark the computation of a changelog heads"""
612 """benchmark the computation of a changelog heads"""
612 opts = _byteskwargs(opts)
613 opts = _byteskwargs(opts)
613 timer, fm = gettimer(ui, opts)
614 timer, fm = gettimer(ui, opts)
614 cl = repo.changelog
615 cl = repo.changelog
615 def s():
616 def s():
616 clearcaches(cl)
617 clearcaches(cl)
617 def d():
618 def d():
618 len(cl.headrevs())
619 len(cl.headrevs())
619 timer(d, setup=s)
620 timer(d, setup=s)
620 fm.end()
621 fm.end()
621
622
622 @command(b'perftags', formatteropts+
623 @command(b'perftags', formatteropts+
623 [
624 [
624 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
625 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
625 ])
626 ])
626 def perftags(ui, repo, **opts):
627 def perftags(ui, repo, **opts):
627 opts = _byteskwargs(opts)
628 opts = _byteskwargs(opts)
628 timer, fm = gettimer(ui, opts)
629 timer, fm = gettimer(ui, opts)
629 repocleartagscache = repocleartagscachefunc(repo)
630 repocleartagscache = repocleartagscachefunc(repo)
630 clearrevlogs = opts[b'clear_revlogs']
631 clearrevlogs = opts[b'clear_revlogs']
631 def s():
632 def s():
632 if clearrevlogs:
633 if clearrevlogs:
633 clearchangelog(repo)
634 clearchangelog(repo)
634 clearfilecache(repo.unfiltered(), 'manifest')
635 clearfilecache(repo.unfiltered(), 'manifest')
635 repocleartagscache()
636 repocleartagscache()
636 def t():
637 def t():
637 return len(repo.tags())
638 return len(repo.tags())
638 timer(t, setup=s)
639 timer(t, setup=s)
639 fm.end()
640 fm.end()
640
641
641 @command(b'perfancestors', formatteropts)
642 @command(b'perfancestors', formatteropts)
642 def perfancestors(ui, repo, **opts):
643 def perfancestors(ui, repo, **opts):
643 opts = _byteskwargs(opts)
644 opts = _byteskwargs(opts)
644 timer, fm = gettimer(ui, opts)
645 timer, fm = gettimer(ui, opts)
645 heads = repo.changelog.headrevs()
646 heads = repo.changelog.headrevs()
646 def d():
647 def d():
647 for a in repo.changelog.ancestors(heads):
648 for a in repo.changelog.ancestors(heads):
648 pass
649 pass
649 timer(d)
650 timer(d)
650 fm.end()
651 fm.end()
651
652
652 @command(b'perfancestorset', formatteropts)
653 @command(b'perfancestorset', formatteropts)
653 def perfancestorset(ui, repo, revset, **opts):
654 def perfancestorset(ui, repo, revset, **opts):
654 opts = _byteskwargs(opts)
655 opts = _byteskwargs(opts)
655 timer, fm = gettimer(ui, opts)
656 timer, fm = gettimer(ui, opts)
656 revs = repo.revs(revset)
657 revs = repo.revs(revset)
657 heads = repo.changelog.headrevs()
658 heads = repo.changelog.headrevs()
658 def d():
659 def d():
659 s = repo.changelog.ancestors(heads)
660 s = repo.changelog.ancestors(heads)
660 for rev in revs:
661 for rev in revs:
661 rev in s
662 rev in s
662 timer(d)
663 timer(d)
663 fm.end()
664 fm.end()
664
665
665 @command(b'perfdiscovery', formatteropts, b'PATH')
666 @command(b'perfdiscovery', formatteropts, b'PATH')
666 def perfdiscovery(ui, repo, path, **opts):
667 def perfdiscovery(ui, repo, path, **opts):
667 """benchmark discovery between local repo and the peer at given path
668 """benchmark discovery between local repo and the peer at given path
668 """
669 """
669 repos = [repo, None]
670 repos = [repo, None]
670 timer, fm = gettimer(ui, opts)
671 timer, fm = gettimer(ui, opts)
671 path = ui.expandpath(path)
672 path = ui.expandpath(path)
672
673
673 def s():
674 def s():
674 repos[1] = hg.peer(ui, opts, path)
675 repos[1] = hg.peer(ui, opts, path)
675 def d():
676 def d():
676 setdiscovery.findcommonheads(ui, *repos)
677 setdiscovery.findcommonheads(ui, *repos)
677 timer(d, setup=s)
678 timer(d, setup=s)
678 fm.end()
679 fm.end()
679
680
680 @command(b'perfbookmarks', formatteropts +
681 @command(b'perfbookmarks', formatteropts +
681 [
682 [
682 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
683 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
683 ])
684 ])
684 def perfbookmarks(ui, repo, **opts):
685 def perfbookmarks(ui, repo, **opts):
685 """benchmark parsing bookmarks from disk to memory"""
686 """benchmark parsing bookmarks from disk to memory"""
686 opts = _byteskwargs(opts)
687 opts = _byteskwargs(opts)
687 timer, fm = gettimer(ui, opts)
688 timer, fm = gettimer(ui, opts)
688
689
689 clearrevlogs = opts[b'clear_revlogs']
690 clearrevlogs = opts[b'clear_revlogs']
690 def s():
691 def s():
691 if clearrevlogs:
692 if clearrevlogs:
692 clearchangelog(repo)
693 clearchangelog(repo)
693 clearfilecache(repo, b'_bookmarks')
694 clearfilecache(repo, b'_bookmarks')
694 def d():
695 def d():
695 repo._bookmarks
696 repo._bookmarks
696 timer(d, setup=s)
697 timer(d, setup=s)
697 fm.end()
698 fm.end()
698
699
699 @command(b'perfbundleread', formatteropts, b'BUNDLE')
700 @command(b'perfbundleread', formatteropts, b'BUNDLE')
700 def perfbundleread(ui, repo, bundlepath, **opts):
701 def perfbundleread(ui, repo, bundlepath, **opts):
701 """Benchmark reading of bundle files.
702 """Benchmark reading of bundle files.
702
703
703 This command is meant to isolate the I/O part of bundle reading as
704 This command is meant to isolate the I/O part of bundle reading as
704 much as possible.
705 much as possible.
705 """
706 """
706 from mercurial import (
707 from mercurial import (
707 bundle2,
708 bundle2,
708 exchange,
709 exchange,
709 streamclone,
710 streamclone,
710 )
711 )
711
712
712 opts = _byteskwargs(opts)
713 opts = _byteskwargs(opts)
713
714
714 def makebench(fn):
715 def makebench(fn):
715 def run():
716 def run():
716 with open(bundlepath, b'rb') as fh:
717 with open(bundlepath, b'rb') as fh:
717 bundle = exchange.readbundle(ui, fh, bundlepath)
718 bundle = exchange.readbundle(ui, fh, bundlepath)
718 fn(bundle)
719 fn(bundle)
719
720
720 return run
721 return run
721
722
722 def makereadnbytes(size):
723 def makereadnbytes(size):
723 def run():
724 def run():
724 with open(bundlepath, b'rb') as fh:
725 with open(bundlepath, b'rb') as fh:
725 bundle = exchange.readbundle(ui, fh, bundlepath)
726 bundle = exchange.readbundle(ui, fh, bundlepath)
726 while bundle.read(size):
727 while bundle.read(size):
727 pass
728 pass
728
729
729 return run
730 return run
730
731
731 def makestdioread(size):
732 def makestdioread(size):
732 def run():
733 def run():
733 with open(bundlepath, b'rb') as fh:
734 with open(bundlepath, b'rb') as fh:
734 while fh.read(size):
735 while fh.read(size):
735 pass
736 pass
736
737
737 return run
738 return run
738
739
739 # bundle1
740 # bundle1
740
741
741 def deltaiter(bundle):
742 def deltaiter(bundle):
742 for delta in bundle.deltaiter():
743 for delta in bundle.deltaiter():
743 pass
744 pass
744
745
745 def iterchunks(bundle):
746 def iterchunks(bundle):
746 for chunk in bundle.getchunks():
747 for chunk in bundle.getchunks():
747 pass
748 pass
748
749
749 # bundle2
750 # bundle2
750
751
751 def forwardchunks(bundle):
752 def forwardchunks(bundle):
752 for chunk in bundle._forwardchunks():
753 for chunk in bundle._forwardchunks():
753 pass
754 pass
754
755
755 def iterparts(bundle):
756 def iterparts(bundle):
756 for part in bundle.iterparts():
757 for part in bundle.iterparts():
757 pass
758 pass
758
759
759 def iterpartsseekable(bundle):
760 def iterpartsseekable(bundle):
760 for part in bundle.iterparts(seekable=True):
761 for part in bundle.iterparts(seekable=True):
761 pass
762 pass
762
763
763 def seek(bundle):
764 def seek(bundle):
764 for part in bundle.iterparts(seekable=True):
765 for part in bundle.iterparts(seekable=True):
765 part.seek(0, os.SEEK_END)
766 part.seek(0, os.SEEK_END)
766
767
767 def makepartreadnbytes(size):
768 def makepartreadnbytes(size):
768 def run():
769 def run():
769 with open(bundlepath, b'rb') as fh:
770 with open(bundlepath, b'rb') as fh:
770 bundle = exchange.readbundle(ui, fh, bundlepath)
771 bundle = exchange.readbundle(ui, fh, bundlepath)
771 for part in bundle.iterparts():
772 for part in bundle.iterparts():
772 while part.read(size):
773 while part.read(size):
773 pass
774 pass
774
775
775 return run
776 return run
776
777
777 benches = [
778 benches = [
778 (makestdioread(8192), b'read(8k)'),
779 (makestdioread(8192), b'read(8k)'),
779 (makestdioread(16384), b'read(16k)'),
780 (makestdioread(16384), b'read(16k)'),
780 (makestdioread(32768), b'read(32k)'),
781 (makestdioread(32768), b'read(32k)'),
781 (makestdioread(131072), b'read(128k)'),
782 (makestdioread(131072), b'read(128k)'),
782 ]
783 ]
783
784
784 with open(bundlepath, b'rb') as fh:
785 with open(bundlepath, b'rb') as fh:
785 bundle = exchange.readbundle(ui, fh, bundlepath)
786 bundle = exchange.readbundle(ui, fh, bundlepath)
786
787
787 if isinstance(bundle, changegroup.cg1unpacker):
788 if isinstance(bundle, changegroup.cg1unpacker):
788 benches.extend([
789 benches.extend([
789 (makebench(deltaiter), b'cg1 deltaiter()'),
790 (makebench(deltaiter), b'cg1 deltaiter()'),
790 (makebench(iterchunks), b'cg1 getchunks()'),
791 (makebench(iterchunks), b'cg1 getchunks()'),
791 (makereadnbytes(8192), b'cg1 read(8k)'),
792 (makereadnbytes(8192), b'cg1 read(8k)'),
792 (makereadnbytes(16384), b'cg1 read(16k)'),
793 (makereadnbytes(16384), b'cg1 read(16k)'),
793 (makereadnbytes(32768), b'cg1 read(32k)'),
794 (makereadnbytes(32768), b'cg1 read(32k)'),
794 (makereadnbytes(131072), b'cg1 read(128k)'),
795 (makereadnbytes(131072), b'cg1 read(128k)'),
795 ])
796 ])
796 elif isinstance(bundle, bundle2.unbundle20):
797 elif isinstance(bundle, bundle2.unbundle20):
797 benches.extend([
798 benches.extend([
798 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
799 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
799 (makebench(iterparts), b'bundle2 iterparts()'),
800 (makebench(iterparts), b'bundle2 iterparts()'),
800 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
801 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
801 (makebench(seek), b'bundle2 part seek()'),
802 (makebench(seek), b'bundle2 part seek()'),
802 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
803 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
803 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
804 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
804 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
805 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
805 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
806 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
806 ])
807 ])
807 elif isinstance(bundle, streamclone.streamcloneapplier):
808 elif isinstance(bundle, streamclone.streamcloneapplier):
808 raise error.Abort(b'stream clone bundles not supported')
809 raise error.Abort(b'stream clone bundles not supported')
809 else:
810 else:
810 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
811 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
811
812
812 for fn, title in benches:
813 for fn, title in benches:
813 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
814 timer(fn, title=title)
815 timer(fn, title=title)
815 fm.end()
816 fm.end()
816
817
817 @command(b'perfchangegroupchangelog', formatteropts +
818 @command(b'perfchangegroupchangelog', formatteropts +
818 [(b'', b'cgversion', b'02', b'changegroup version'),
819 [(b'', b'cgversion', b'02', b'changegroup version'),
819 (b'r', b'rev', b'', b'revisions to add to changegroup')])
820 (b'r', b'rev', b'', b'revisions to add to changegroup')])
820 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
821 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
821 """Benchmark producing a changelog group for a changegroup.
822 """Benchmark producing a changelog group for a changegroup.
822
823
823 This measures the time spent processing the changelog during a
824 This measures the time spent processing the changelog during a
824 bundle operation. This occurs during `hg bundle` and on a server
825 bundle operation. This occurs during `hg bundle` and on a server
825 processing a `getbundle` wire protocol request (handles clones
826 processing a `getbundle` wire protocol request (handles clones
826 and pull requests).
827 and pull requests).
827
828
828 By default, all revisions are added to the changegroup.
829 By default, all revisions are added to the changegroup.
829 """
830 """
830 opts = _byteskwargs(opts)
831 opts = _byteskwargs(opts)
831 cl = repo.changelog
832 cl = repo.changelog
832 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
833 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
833 bundler = changegroup.getbundler(cgversion, repo)
834 bundler = changegroup.getbundler(cgversion, repo)
834
835
835 def d():
836 def d():
836 state, chunks = bundler._generatechangelog(cl, nodes)
837 state, chunks = bundler._generatechangelog(cl, nodes)
837 for chunk in chunks:
838 for chunk in chunks:
838 pass
839 pass
839
840
840 timer, fm = gettimer(ui, opts)
841 timer, fm = gettimer(ui, opts)
841
842
842 # Terminal printing can interfere with timing. So disable it.
843 # Terminal printing can interfere with timing. So disable it.
843 with ui.configoverride({(b'progress', b'disable'): True}):
844 with ui.configoverride({(b'progress', b'disable'): True}):
844 timer(d)
845 timer(d)
845
846
846 fm.end()
847 fm.end()
847
848
848 @command(b'perfdirs', formatteropts)
849 @command(b'perfdirs', formatteropts)
849 def perfdirs(ui, repo, **opts):
850 def perfdirs(ui, repo, **opts):
850 opts = _byteskwargs(opts)
851 opts = _byteskwargs(opts)
851 timer, fm = gettimer(ui, opts)
852 timer, fm = gettimer(ui, opts)
852 dirstate = repo.dirstate
853 dirstate = repo.dirstate
853 b'a' in dirstate
854 b'a' in dirstate
854 def d():
855 def d():
855 dirstate.hasdir(b'a')
856 dirstate.hasdir(b'a')
856 del dirstate._map._dirs
857 del dirstate._map._dirs
857 timer(d)
858 timer(d)
858 fm.end()
859 fm.end()
859
860
860 @command(b'perfdirstate', formatteropts)
861 @command(b'perfdirstate', formatteropts)
861 def perfdirstate(ui, repo, **opts):
862 def perfdirstate(ui, repo, **opts):
862 opts = _byteskwargs(opts)
863 opts = _byteskwargs(opts)
863 timer, fm = gettimer(ui, opts)
864 timer, fm = gettimer(ui, opts)
864 b"a" in repo.dirstate
865 b"a" in repo.dirstate
865 def d():
866 def d():
866 repo.dirstate.invalidate()
867 repo.dirstate.invalidate()
867 b"a" in repo.dirstate
868 b"a" in repo.dirstate
868 timer(d)
869 timer(d)
869 fm.end()
870 fm.end()
870
871
871 @command(b'perfdirstatedirs', formatteropts)
872 @command(b'perfdirstatedirs', formatteropts)
872 def perfdirstatedirs(ui, repo, **opts):
873 def perfdirstatedirs(ui, repo, **opts):
873 opts = _byteskwargs(opts)
874 opts = _byteskwargs(opts)
874 timer, fm = gettimer(ui, opts)
875 timer, fm = gettimer(ui, opts)
875 b"a" in repo.dirstate
876 b"a" in repo.dirstate
876 def d():
877 def d():
877 repo.dirstate.hasdir(b"a")
878 repo.dirstate.hasdir(b"a")
878 del repo.dirstate._map._dirs
879 del repo.dirstate._map._dirs
879 timer(d)
880 timer(d)
880 fm.end()
881 fm.end()
881
882
882 @command(b'perfdirstatefoldmap', formatteropts)
883 @command(b'perfdirstatefoldmap', formatteropts)
883 def perfdirstatefoldmap(ui, repo, **opts):
884 def perfdirstatefoldmap(ui, repo, **opts):
884 opts = _byteskwargs(opts)
885 opts = _byteskwargs(opts)
885 timer, fm = gettimer(ui, opts)
886 timer, fm = gettimer(ui, opts)
886 dirstate = repo.dirstate
887 dirstate = repo.dirstate
887 b'a' in dirstate
888 b'a' in dirstate
888 def d():
889 def d():
889 dirstate._map.filefoldmap.get(b'a')
890 dirstate._map.filefoldmap.get(b'a')
890 del dirstate._map.filefoldmap
891 del dirstate._map.filefoldmap
891 timer(d)
892 timer(d)
892 fm.end()
893 fm.end()
893
894
894 @command(b'perfdirfoldmap', formatteropts)
895 @command(b'perfdirfoldmap', formatteropts)
895 def perfdirfoldmap(ui, repo, **opts):
896 def perfdirfoldmap(ui, repo, **opts):
896 opts = _byteskwargs(opts)
897 opts = _byteskwargs(opts)
897 timer, fm = gettimer(ui, opts)
898 timer, fm = gettimer(ui, opts)
898 dirstate = repo.dirstate
899 dirstate = repo.dirstate
899 b'a' in dirstate
900 b'a' in dirstate
900 def d():
901 def d():
901 dirstate._map.dirfoldmap.get(b'a')
902 dirstate._map.dirfoldmap.get(b'a')
902 del dirstate._map.dirfoldmap
903 del dirstate._map.dirfoldmap
903 del dirstate._map._dirs
904 del dirstate._map._dirs
904 timer(d)
905 timer(d)
905 fm.end()
906 fm.end()
906
907
907 @command(b'perfdirstatewrite', formatteropts)
908 @command(b'perfdirstatewrite', formatteropts)
908 def perfdirstatewrite(ui, repo, **opts):
909 def perfdirstatewrite(ui, repo, **opts):
909 opts = _byteskwargs(opts)
910 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
911 timer, fm = gettimer(ui, opts)
911 ds = repo.dirstate
912 ds = repo.dirstate
912 b"a" in ds
913 b"a" in ds
913 def d():
914 def d():
914 ds._dirty = True
915 ds._dirty = True
915 ds.write(repo.currenttransaction())
916 ds.write(repo.currenttransaction())
916 timer(d)
917 timer(d)
917 fm.end()
918 fm.end()
918
919
919 @command(b'perfmergecalculate',
920 @command(b'perfmergecalculate',
920 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
921 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
921 def perfmergecalculate(ui, repo, rev, **opts):
922 def perfmergecalculate(ui, repo, rev, **opts):
922 opts = _byteskwargs(opts)
923 opts = _byteskwargs(opts)
923 timer, fm = gettimer(ui, opts)
924 timer, fm = gettimer(ui, opts)
924 wctx = repo[None]
925 wctx = repo[None]
925 rctx = scmutil.revsingle(repo, rev, rev)
926 rctx = scmutil.revsingle(repo, rev, rev)
926 ancestor = wctx.ancestor(rctx)
927 ancestor = wctx.ancestor(rctx)
927 # we don't want working dir files to be stat'd in the benchmark, so prime
928 # we don't want working dir files to be stat'd in the benchmark, so prime
928 # that cache
929 # that cache
929 wctx.dirty()
930 wctx.dirty()
930 def d():
931 def d():
931 # acceptremote is True because we don't want prompts in the middle of
932 # acceptremote is True because we don't want prompts in the middle of
932 # our benchmark
933 # our benchmark
933 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
934 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
934 acceptremote=True, followcopies=True)
935 acceptremote=True, followcopies=True)
935 timer(d)
936 timer(d)
936 fm.end()
937 fm.end()
937
938
938 @command(b'perfpathcopies', [], b"REV REV")
939 @command(b'perfpathcopies', [], b"REV REV")
939 def perfpathcopies(ui, repo, rev1, rev2, **opts):
940 def perfpathcopies(ui, repo, rev1, rev2, **opts):
940 """benchmark the copy tracing logic"""
941 """benchmark the copy tracing logic"""
941 opts = _byteskwargs(opts)
942 opts = _byteskwargs(opts)
942 timer, fm = gettimer(ui, opts)
943 timer, fm = gettimer(ui, opts)
943 ctx1 = scmutil.revsingle(repo, rev1, rev1)
944 ctx1 = scmutil.revsingle(repo, rev1, rev1)
944 ctx2 = scmutil.revsingle(repo, rev2, rev2)
945 ctx2 = scmutil.revsingle(repo, rev2, rev2)
945 def d():
946 def d():
946 copies.pathcopies(ctx1, ctx2)
947 copies.pathcopies(ctx1, ctx2)
947 timer(d)
948 timer(d)
948 fm.end()
949 fm.end()
949
950
950 @command(b'perfphases',
951 @command(b'perfphases',
951 [(b'', b'full', False, b'include file reading time too'),
952 [(b'', b'full', False, b'include file reading time too'),
952 ], b"")
953 ], b"")
953 def perfphases(ui, repo, **opts):
954 def perfphases(ui, repo, **opts):
954 """benchmark phasesets computation"""
955 """benchmark phasesets computation"""
955 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
956 timer, fm = gettimer(ui, opts)
957 timer, fm = gettimer(ui, opts)
957 _phases = repo._phasecache
958 _phases = repo._phasecache
958 full = opts.get(b'full')
959 full = opts.get(b'full')
959 def d():
960 def d():
960 phases = _phases
961 phases = _phases
961 if full:
962 if full:
962 clearfilecache(repo, b'_phasecache')
963 clearfilecache(repo, b'_phasecache')
963 phases = repo._phasecache
964 phases = repo._phasecache
964 phases.invalidate()
965 phases.invalidate()
965 phases.loadphaserevs(repo)
966 phases.loadphaserevs(repo)
966 timer(d)
967 timer(d)
967 fm.end()
968 fm.end()
968
969
969 @command(b'perfphasesremote',
970 @command(b'perfphasesremote',
970 [], b"[DEST]")
971 [], b"[DEST]")
971 def perfphasesremote(ui, repo, dest=None, **opts):
972 def perfphasesremote(ui, repo, dest=None, **opts):
972 """benchmark time needed to analyse phases of the remote server"""
973 """benchmark time needed to analyse phases of the remote server"""
973 from mercurial.node import (
974 from mercurial.node import (
974 bin,
975 bin,
975 )
976 )
976 from mercurial import (
977 from mercurial import (
977 exchange,
978 exchange,
978 hg,
979 hg,
979 phases,
980 phases,
980 )
981 )
981 opts = _byteskwargs(opts)
982 opts = _byteskwargs(opts)
982 timer, fm = gettimer(ui, opts)
983 timer, fm = gettimer(ui, opts)
983
984
984 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
985 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
985 if not path:
986 if not path:
986 raise error.Abort((b'default repository not configured!'),
987 raise error.Abort((b'default repository not configured!'),
987 hint=(b"see 'hg help config.paths'"))
988 hint=(b"see 'hg help config.paths'"))
988 dest = path.pushloc or path.loc
989 dest = path.pushloc or path.loc
989 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
990 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
990 other = hg.peer(repo, opts, dest)
991 other = hg.peer(repo, opts, dest)
991
992
992 # easier to perform discovery through the operation
993 # easier to perform discovery through the operation
993 op = exchange.pushoperation(repo, other)
994 op = exchange.pushoperation(repo, other)
994 exchange._pushdiscoverychangeset(op)
995 exchange._pushdiscoverychangeset(op)
995
996
996 remotesubset = op.fallbackheads
997 remotesubset = op.fallbackheads
997
998
998 with other.commandexecutor() as e:
999 with other.commandexecutor() as e:
999 remotephases = e.callcommand(b'listkeys',
1000 remotephases = e.callcommand(b'listkeys',
1000 {b'namespace': b'phases'}).result()
1001 {b'namespace': b'phases'}).result()
1001 del other
1002 del other
1002 publishing = remotephases.get(b'publishing', False)
1003 publishing = remotephases.get(b'publishing', False)
1003 if publishing:
1004 if publishing:
1004 ui.status((b'publishing: yes\n'))
1005 ui.status((b'publishing: yes\n'))
1005 else:
1006 else:
1006 ui.status((b'publishing: no\n'))
1007 ui.status((b'publishing: no\n'))
1007
1008
1008 nodemap = repo.changelog.nodemap
1009 nodemap = repo.changelog.nodemap
1009 nonpublishroots = 0
1010 nonpublishroots = 0
1010 for nhex, phase in remotephases.iteritems():
1011 for nhex, phase in remotephases.iteritems():
1011 if nhex == b'publishing': # ignore data related to publish option
1012 if nhex == b'publishing': # ignore data related to publish option
1012 continue
1013 continue
1013 node = bin(nhex)
1014 node = bin(nhex)
1014 if node in nodemap and int(phase):
1015 if node in nodemap and int(phase):
1015 nonpublishroots += 1
1016 nonpublishroots += 1
1016 ui.status((b'number of roots: %d\n') % len(remotephases))
1017 ui.status((b'number of roots: %d\n') % len(remotephases))
1017 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1018 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1018 def d():
1019 def d():
1019 phases.remotephasessummary(repo,
1020 phases.remotephasessummary(repo,
1020 remotesubset,
1021 remotesubset,
1021 remotephases)
1022 remotephases)
1022 timer(d)
1023 timer(d)
1023 fm.end()
1024 fm.end()
1024
1025
1025 @command(b'perfmanifest',[
1026 @command(b'perfmanifest',[
1026 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1027 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1027 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1028 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1028 ] + formatteropts, b'REV|NODE')
1029 ] + formatteropts, b'REV|NODE')
1029 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1030 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1030 """benchmark the time to read a manifest from disk and return a usable
1031 """benchmark the time to read a manifest from disk and return a usable
1031 dict-like object
1032 dict-like object
1032
1033
1033 Manifest caches are cleared before retrieval."""
1034 Manifest caches are cleared before retrieval."""
1034 opts = _byteskwargs(opts)
1035 opts = _byteskwargs(opts)
1035 timer, fm = gettimer(ui, opts)
1036 timer, fm = gettimer(ui, opts)
1036 if not manifest_rev:
1037 if not manifest_rev:
1037 ctx = scmutil.revsingle(repo, rev, rev)
1038 ctx = scmutil.revsingle(repo, rev, rev)
1038 t = ctx.manifestnode()
1039 t = ctx.manifestnode()
1039 else:
1040 else:
1040 from mercurial.node import bin
1041 from mercurial.node import bin
1041
1042
1042 if len(rev) == 40:
1043 if len(rev) == 40:
1043 t = bin(rev)
1044 t = bin(rev)
1044 else:
1045 else:
1045 try:
1046 try:
1046 rev = int(rev)
1047 rev = int(rev)
1047
1048
1048 if util.safehasattr(repo.manifestlog, b'getstorage'):
1049 if util.safehasattr(repo.manifestlog, b'getstorage'):
1049 t = repo.manifestlog.getstorage(b'').node(rev)
1050 t = repo.manifestlog.getstorage(b'').node(rev)
1050 else:
1051 else:
1051 t = repo.manifestlog._revlog.lookup(rev)
1052 t = repo.manifestlog._revlog.lookup(rev)
1052 except ValueError:
1053 except ValueError:
1053 raise error.Abort(b'manifest revision must be integer or full '
1054 raise error.Abort(b'manifest revision must be integer or full '
1054 b'node')
1055 b'node')
1055 def d():
1056 def d():
1056 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1057 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1057 repo.manifestlog[t].read()
1058 repo.manifestlog[t].read()
1058 timer(d)
1059 timer(d)
1059 fm.end()
1060 fm.end()
1060
1061
1061 @command(b'perfchangeset', formatteropts)
1062 @command(b'perfchangeset', formatteropts)
1062 def perfchangeset(ui, repo, rev, **opts):
1063 def perfchangeset(ui, repo, rev, **opts):
1063 opts = _byteskwargs(opts)
1064 opts = _byteskwargs(opts)
1064 timer, fm = gettimer(ui, opts)
1065 timer, fm = gettimer(ui, opts)
1065 n = scmutil.revsingle(repo, rev).node()
1066 n = scmutil.revsingle(repo, rev).node()
1066 def d():
1067 def d():
1067 repo.changelog.read(n)
1068 repo.changelog.read(n)
1068 #repo.changelog._cache = None
1069 #repo.changelog._cache = None
1069 timer(d)
1070 timer(d)
1070 fm.end()
1071 fm.end()
1071
1072
1072 @command(b'perfignore', formatteropts)
1073 @command(b'perfignore', formatteropts)
1073 def perfignore(ui, repo, **opts):
1074 def perfignore(ui, repo, **opts):
1074 """benchmark operation related to computing ignore"""
1075 """benchmark operation related to computing ignore"""
1075 opts = _byteskwargs(opts)
1076 opts = _byteskwargs(opts)
1076 timer, fm = gettimer(ui, opts)
1077 timer, fm = gettimer(ui, opts)
1077 dirstate = repo.dirstate
1078 dirstate = repo.dirstate
1078
1079
1079 def setupone():
1080 def setupone():
1080 dirstate.invalidate()
1081 dirstate.invalidate()
1081 clearfilecache(dirstate, b'_ignore')
1082 clearfilecache(dirstate, b'_ignore')
1082
1083
1083 def runone():
1084 def runone():
1084 dirstate._ignore
1085 dirstate._ignore
1085
1086
1086 timer(runone, setup=setupone, title=b"load")
1087 timer(runone, setup=setupone, title=b"load")
1087 fm.end()
1088 fm.end()
1088
1089
1089 @command(b'perfindex', [
1090 @command(b'perfindex', [
1090 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1091 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1091 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1092 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1092 ] + formatteropts)
1093 ] + formatteropts)
1093 def perfindex(ui, repo, **opts):
1094 def perfindex(ui, repo, **opts):
1094 """benchmark index creation time followed by a lookup
1095 """benchmark index creation time followed by a lookup
1095
1096
1096 The default is to look `tip` up. Depending on the index implementation,
1097 The default is to look `tip` up. Depending on the index implementation,
1097 the revision looked up can matters. For example, an implementation
1098 the revision looked up can matters. For example, an implementation
1098 scanning the index will have a faster lookup time for `--rev tip` than for
1099 scanning the index will have a faster lookup time for `--rev tip` than for
1099 `--rev 0`. The number of looked up revisions and their order can also
1100 `--rev 0`. The number of looked up revisions and their order can also
1100 matters.
1101 matters.
1101
1102
1102 Example of useful set to test:
1103 Example of useful set to test:
1103 * tip
1104 * tip
1104 * 0
1105 * 0
1105 * -10:
1106 * -10:
1106 * :10
1107 * :10
1107 * -10: + :10
1108 * -10: + :10
1108 * :10: + -10:
1109 * :10: + -10:
1109 * -10000:
1110 * -10000:
1110 * -10000: + 0
1111 * -10000: + 0
1111
1112
1112 It is not currently possible to check for lookup of a missing node. For
1113 It is not currently possible to check for lookup of a missing node. For
1113 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1114 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1114 import mercurial.revlog
1115 import mercurial.revlog
1115 opts = _byteskwargs(opts)
1116 opts = _byteskwargs(opts)
1116 timer, fm = gettimer(ui, opts)
1117 timer, fm = gettimer(ui, opts)
1117 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1118 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1118 if opts[b'no_lookup']:
1119 if opts[b'no_lookup']:
1119 if opts['rev']:
1120 if opts['rev']:
1120 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1121 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1121 nodes = []
1122 nodes = []
1122 elif not opts[b'rev']:
1123 elif not opts[b'rev']:
1123 nodes = [repo[b"tip"].node()]
1124 nodes = [repo[b"tip"].node()]
1124 else:
1125 else:
1125 revs = scmutil.revrange(repo, opts[b'rev'])
1126 revs = scmutil.revrange(repo, opts[b'rev'])
1126 cl = repo.changelog
1127 cl = repo.changelog
1127 nodes = [cl.node(r) for r in revs]
1128 nodes = [cl.node(r) for r in revs]
1128
1129
1129 unfi = repo.unfiltered()
1130 unfi = repo.unfiltered()
1130 # find the filecache func directly
1131 # find the filecache func directly
1131 # This avoid polluting the benchmark with the filecache logic
1132 # This avoid polluting the benchmark with the filecache logic
1132 makecl = unfi.__class__.changelog.func
1133 makecl = unfi.__class__.changelog.func
1133 def setup():
1134 def setup():
1134 # probably not necessary, but for good measure
1135 # probably not necessary, but for good measure
1135 clearchangelog(unfi)
1136 clearchangelog(unfi)
1136 def d():
1137 def d():
1137 cl = makecl(unfi)
1138 cl = makecl(unfi)
1138 for n in nodes:
1139 for n in nodes:
1139 cl.rev(n)
1140 cl.rev(n)
1140 timer(d, setup=setup)
1141 timer(d, setup=setup)
1141 fm.end()
1142 fm.end()
1142
1143
1143 @command(b'perfnodemap', [
1144 @command(b'perfnodemap', [
1144 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1145 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1145 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1146 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1146 ] + formatteropts)
1147 ] + formatteropts)
1147 def perfnodemap(ui, repo, **opts):
1148 def perfnodemap(ui, repo, **opts):
1148 """benchmark the time necessary to look up revision from a cold nodemap
1149 """benchmark the time necessary to look up revision from a cold nodemap
1149
1150
1150 Depending on the implementation, the amount and order of revision we look
1151 Depending on the implementation, the amount and order of revision we look
1151 up can varies. Example of useful set to test:
1152 up can varies. Example of useful set to test:
1152 * tip
1153 * tip
1153 * 0
1154 * 0
1154 * -10:
1155 * -10:
1155 * :10
1156 * :10
1156 * -10: + :10
1157 * -10: + :10
1157 * :10: + -10:
1158 * :10: + -10:
1158 * -10000:
1159 * -10000:
1159 * -10000: + 0
1160 * -10000: + 0
1160
1161
1161 The command currently focus on valid binary lookup. Benchmarking for
1162 The command currently focus on valid binary lookup. Benchmarking for
1162 hexlookup, prefix lookup and missing lookup would also be valuable.
1163 hexlookup, prefix lookup and missing lookup would also be valuable.
1163 """
1164 """
1164 import mercurial.revlog
1165 import mercurial.revlog
1165 opts = _byteskwargs(opts)
1166 opts = _byteskwargs(opts)
1166 timer, fm = gettimer(ui, opts)
1167 timer, fm = gettimer(ui, opts)
1167 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1168 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1168
1169
1169 unfi = repo.unfiltered()
1170 unfi = repo.unfiltered()
1170 clearcaches = opts['clear_caches']
1171 clearcaches = opts['clear_caches']
1171 # find the filecache func directly
1172 # find the filecache func directly
1172 # This avoid polluting the benchmark with the filecache logic
1173 # This avoid polluting the benchmark with the filecache logic
1173 makecl = unfi.__class__.changelog.func
1174 makecl = unfi.__class__.changelog.func
1174 if not opts[b'rev']:
1175 if not opts[b'rev']:
1175 raise error.Abort('use --rev to specify revisions to look up')
1176 raise error.Abort('use --rev to specify revisions to look up')
1176 revs = scmutil.revrange(repo, opts[b'rev'])
1177 revs = scmutil.revrange(repo, opts[b'rev'])
1177 cl = repo.changelog
1178 cl = repo.changelog
1178 nodes = [cl.node(r) for r in revs]
1179 nodes = [cl.node(r) for r in revs]
1179
1180
1180 # use a list to pass reference to a nodemap from one closure to the next
1181 # use a list to pass reference to a nodemap from one closure to the next
1181 nodeget = [None]
1182 nodeget = [None]
1182 def setnodeget():
1183 def setnodeget():
1183 # probably not necessary, but for good measure
1184 # probably not necessary, but for good measure
1184 clearchangelog(unfi)
1185 clearchangelog(unfi)
1185 nodeget[0] = makecl(unfi).nodemap.get
1186 nodeget[0] = makecl(unfi).nodemap.get
1186
1187
1187 def d():
1188 def d():
1188 get = nodeget[0]
1189 get = nodeget[0]
1189 for n in nodes:
1190 for n in nodes:
1190 get(n)
1191 get(n)
1191
1192
1192 setup = None
1193 setup = None
1193 if clearcaches:
1194 if clearcaches:
1194 def setup():
1195 def setup():
1195 setnodeget()
1196 setnodeget()
1196 else:
1197 else:
1197 setnodeget()
1198 setnodeget()
1198 d() # prewarm the data structure
1199 d() # prewarm the data structure
1199 timer(d, setup=setup)
1200 timer(d, setup=setup)
1200 fm.end()
1201 fm.end()
1201
1202
1202 @command(b'perfstartup', formatteropts)
1203 @command(b'perfstartup', formatteropts)
1203 def perfstartup(ui, repo, **opts):
1204 def perfstartup(ui, repo, **opts):
1204 opts = _byteskwargs(opts)
1205 opts = _byteskwargs(opts)
1205 timer, fm = gettimer(ui, opts)
1206 timer, fm = gettimer(ui, opts)
1206 def d():
1207 def d():
1207 if os.name != r'nt':
1208 if os.name != r'nt':
1208 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1209 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1209 fsencode(sys.argv[0]))
1210 fsencode(sys.argv[0]))
1210 else:
1211 else:
1211 os.environ[r'HGRCPATH'] = r' '
1212 os.environ[r'HGRCPATH'] = r' '
1212 os.system(r"%s version -q > NUL" % sys.argv[0])
1213 os.system(r"%s version -q > NUL" % sys.argv[0])
1213 timer(d)
1214 timer(d)
1214 fm.end()
1215 fm.end()
1215
1216
1216 @command(b'perfparents', formatteropts)
1217 @command(b'perfparents', formatteropts)
1217 def perfparents(ui, repo, **opts):
1218 def perfparents(ui, repo, **opts):
1218 """benchmark the time necessary to fetch one changeset's parents.
1219 """benchmark the time necessary to fetch one changeset's parents.
1219
1220
1220 The fetch is done using the `node identifier`, traversing all object layer
1221 The fetch is done using the `node identifier`, traversing all object layers
1221 from the repository object. The N first revision will be used for this
1222 from the repository object. The first N revisions will be used for this
1222 benchmark. N is controlled by the ``perf.parentscount`` config option
1223 benchmark. N is controlled by the ``perf.parentscount`` config option
1223 (default: 1000).
1224 (default: 1000).
1224 """
1225 """
1225 opts = _byteskwargs(opts)
1226 opts = _byteskwargs(opts)
1226 timer, fm = gettimer(ui, opts)
1227 timer, fm = gettimer(ui, opts)
1227 # control the number of commits perfparents iterates over
1228 # control the number of commits perfparents iterates over
1228 # experimental config: perf.parentscount
1229 # experimental config: perf.parentscount
1229 count = getint(ui, b"perf", b"parentscount", 1000)
1230 count = getint(ui, b"perf", b"parentscount", 1000)
1230 if len(repo.changelog) < count:
1231 if len(repo.changelog) < count:
1231 raise error.Abort(b"repo needs %d commits for this test" % count)
1232 raise error.Abort(b"repo needs %d commits for this test" % count)
1232 repo = repo.unfiltered()
1233 repo = repo.unfiltered()
1233 nl = [repo.changelog.node(i) for i in _xrange(count)]
1234 nl = [repo.changelog.node(i) for i in _xrange(count)]
1234 def d():
1235 def d():
1235 for n in nl:
1236 for n in nl:
1236 repo.changelog.parents(n)
1237 repo.changelog.parents(n)
1237 timer(d)
1238 timer(d)
1238 fm.end()
1239 fm.end()
1239
1240
1240 @command(b'perfctxfiles', formatteropts)
1241 @command(b'perfctxfiles', formatteropts)
1241 def perfctxfiles(ui, repo, x, **opts):
1242 def perfctxfiles(ui, repo, x, **opts):
1242 opts = _byteskwargs(opts)
1243 opts = _byteskwargs(opts)
1243 x = int(x)
1244 x = int(x)
1244 timer, fm = gettimer(ui, opts)
1245 timer, fm = gettimer(ui, opts)
1245 def d():
1246 def d():
1246 len(repo[x].files())
1247 len(repo[x].files())
1247 timer(d)
1248 timer(d)
1248 fm.end()
1249 fm.end()
1249
1250
1250 @command(b'perfrawfiles', formatteropts)
1251 @command(b'perfrawfiles', formatteropts)
1251 def perfrawfiles(ui, repo, x, **opts):
1252 def perfrawfiles(ui, repo, x, **opts):
1252 opts = _byteskwargs(opts)
1253 opts = _byteskwargs(opts)
1253 x = int(x)
1254 x = int(x)
1254 timer, fm = gettimer(ui, opts)
1255 timer, fm = gettimer(ui, opts)
1255 cl = repo.changelog
1256 cl = repo.changelog
1256 def d():
1257 def d():
1257 len(cl.read(x)[3])
1258 len(cl.read(x)[3])
1258 timer(d)
1259 timer(d)
1259 fm.end()
1260 fm.end()
1260
1261
1261 @command(b'perflookup', formatteropts)
1262 @command(b'perflookup', formatteropts)
1262 def perflookup(ui, repo, rev, **opts):
1263 def perflookup(ui, repo, rev, **opts):
1263 opts = _byteskwargs(opts)
1264 opts = _byteskwargs(opts)
1264 timer, fm = gettimer(ui, opts)
1265 timer, fm = gettimer(ui, opts)
1265 timer(lambda: len(repo.lookup(rev)))
1266 timer(lambda: len(repo.lookup(rev)))
1266 fm.end()
1267 fm.end()
1267
1268
1268 @command(b'perflinelogedits',
1269 @command(b'perflinelogedits',
1269 [(b'n', b'edits', 10000, b'number of edits'),
1270 [(b'n', b'edits', 10000, b'number of edits'),
1270 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1271 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1271 ], norepo=True)
1272 ], norepo=True)
1272 def perflinelogedits(ui, **opts):
1273 def perflinelogedits(ui, **opts):
1273 from mercurial import linelog
1274 from mercurial import linelog
1274
1275
1275 opts = _byteskwargs(opts)
1276 opts = _byteskwargs(opts)
1276
1277
1277 edits = opts[b'edits']
1278 edits = opts[b'edits']
1278 maxhunklines = opts[b'max_hunk_lines']
1279 maxhunklines = opts[b'max_hunk_lines']
1279
1280
1280 maxb1 = 100000
1281 maxb1 = 100000
1281 random.seed(0)
1282 random.seed(0)
1282 randint = random.randint
1283 randint = random.randint
1283 currentlines = 0
1284 currentlines = 0
1284 arglist = []
1285 arglist = []
1285 for rev in _xrange(edits):
1286 for rev in _xrange(edits):
1286 a1 = randint(0, currentlines)
1287 a1 = randint(0, currentlines)
1287 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1288 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1288 b1 = randint(0, maxb1)
1289 b1 = randint(0, maxb1)
1289 b2 = randint(b1, b1 + maxhunklines)
1290 b2 = randint(b1, b1 + maxhunklines)
1290 currentlines += (b2 - b1) - (a2 - a1)
1291 currentlines += (b2 - b1) - (a2 - a1)
1291 arglist.append((rev, a1, a2, b1, b2))
1292 arglist.append((rev, a1, a2, b1, b2))
1292
1293
1293 def d():
1294 def d():
1294 ll = linelog.linelog()
1295 ll = linelog.linelog()
1295 for args in arglist:
1296 for args in arglist:
1296 ll.replacelines(*args)
1297 ll.replacelines(*args)
1297
1298
1298 timer, fm = gettimer(ui, opts)
1299 timer, fm = gettimer(ui, opts)
1299 timer(d)
1300 timer(d)
1300 fm.end()
1301 fm.end()
1301
1302
1302 @command(b'perfrevrange', formatteropts)
1303 @command(b'perfrevrange', formatteropts)
1303 def perfrevrange(ui, repo, *specs, **opts):
1304 def perfrevrange(ui, repo, *specs, **opts):
1304 opts = _byteskwargs(opts)
1305 opts = _byteskwargs(opts)
1305 timer, fm = gettimer(ui, opts)
1306 timer, fm = gettimer(ui, opts)
1306 revrange = scmutil.revrange
1307 revrange = scmutil.revrange
1307 timer(lambda: len(revrange(repo, specs)))
1308 timer(lambda: len(revrange(repo, specs)))
1308 fm.end()
1309 fm.end()
1309
1310
1310 @command(b'perfnodelookup', formatteropts)
1311 @command(b'perfnodelookup', formatteropts)
1311 def perfnodelookup(ui, repo, rev, **opts):
1312 def perfnodelookup(ui, repo, rev, **opts):
1312 opts = _byteskwargs(opts)
1313 opts = _byteskwargs(opts)
1313 timer, fm = gettimer(ui, opts)
1314 timer, fm = gettimer(ui, opts)
1314 import mercurial.revlog
1315 import mercurial.revlog
1315 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1316 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1316 n = scmutil.revsingle(repo, rev).node()
1317 n = scmutil.revsingle(repo, rev).node()
1317 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1318 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1318 def d():
1319 def d():
1319 cl.rev(n)
1320 cl.rev(n)
1320 clearcaches(cl)
1321 clearcaches(cl)
1321 timer(d)
1322 timer(d)
1322 fm.end()
1323 fm.end()
1323
1324
1324 @command(b'perflog',
1325 @command(b'perflog',
1325 [(b'', b'rename', False, b'ask log to follow renames')
1326 [(b'', b'rename', False, b'ask log to follow renames')
1326 ] + formatteropts)
1327 ] + formatteropts)
1327 def perflog(ui, repo, rev=None, **opts):
1328 def perflog(ui, repo, rev=None, **opts):
1328 opts = _byteskwargs(opts)
1329 opts = _byteskwargs(opts)
1329 if rev is None:
1330 if rev is None:
1330 rev=[]
1331 rev=[]
1331 timer, fm = gettimer(ui, opts)
1332 timer, fm = gettimer(ui, opts)
1332 ui.pushbuffer()
1333 ui.pushbuffer()
1333 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1334 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1334 copies=opts.get(b'rename')))
1335 copies=opts.get(b'rename')))
1335 ui.popbuffer()
1336 ui.popbuffer()
1336 fm.end()
1337 fm.end()
1337
1338
1338 @command(b'perfmoonwalk', formatteropts)
1339 @command(b'perfmoonwalk', formatteropts)
1339 def perfmoonwalk(ui, repo, **opts):
1340 def perfmoonwalk(ui, repo, **opts):
1340 """benchmark walking the changelog backwards
1341 """benchmark walking the changelog backwards
1341
1342
1342 This also loads the changelog data for each revision in the changelog.
1343 This also loads the changelog data for each revision in the changelog.
1343 """
1344 """
1344 opts = _byteskwargs(opts)
1345 opts = _byteskwargs(opts)
1345 timer, fm = gettimer(ui, opts)
1346 timer, fm = gettimer(ui, opts)
1346 def moonwalk():
1347 def moonwalk():
1347 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1348 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1348 ctx = repo[i]
1349 ctx = repo[i]
1349 ctx.branch() # read changelog data (in addition to the index)
1350 ctx.branch() # read changelog data (in addition to the index)
1350 timer(moonwalk)
1351 timer(moonwalk)
1351 fm.end()
1352 fm.end()
1352
1353
1353 @command(b'perftemplating',
1354 @command(b'perftemplating',
1354 [(b'r', b'rev', [], b'revisions to run the template on'),
1355 [(b'r', b'rev', [], b'revisions to run the template on'),
1355 ] + formatteropts)
1356 ] + formatteropts)
1356 def perftemplating(ui, repo, testedtemplate=None, **opts):
1357 def perftemplating(ui, repo, testedtemplate=None, **opts):
1357 """test the rendering time of a given template"""
1358 """test the rendering time of a given template"""
1358 if makelogtemplater is None:
1359 if makelogtemplater is None:
1359 raise error.Abort((b"perftemplating not available with this Mercurial"),
1360 raise error.Abort((b"perftemplating not available with this Mercurial"),
1360 hint=b"use 4.3 or later")
1361 hint=b"use 4.3 or later")
1361
1362
1362 opts = _byteskwargs(opts)
1363 opts = _byteskwargs(opts)
1363
1364
1364 nullui = ui.copy()
1365 nullui = ui.copy()
1365 nullui.fout = open(os.devnull, r'wb')
1366 nullui.fout = open(os.devnull, r'wb')
1366 nullui.disablepager()
1367 nullui.disablepager()
1367 revs = opts.get(b'rev')
1368 revs = opts.get(b'rev')
1368 if not revs:
1369 if not revs:
1369 revs = [b'all()']
1370 revs = [b'all()']
1370 revs = list(scmutil.revrange(repo, revs))
1371 revs = list(scmutil.revrange(repo, revs))
1371
1372
1372 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1373 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1373 b' {author|person}: {desc|firstline}\n')
1374 b' {author|person}: {desc|firstline}\n')
1374 if testedtemplate is None:
1375 if testedtemplate is None:
1375 testedtemplate = defaulttemplate
1376 testedtemplate = defaulttemplate
1376 displayer = makelogtemplater(nullui, repo, testedtemplate)
1377 displayer = makelogtemplater(nullui, repo, testedtemplate)
1377 def format():
1378 def format():
1378 for r in revs:
1379 for r in revs:
1379 ctx = repo[r]
1380 ctx = repo[r]
1380 displayer.show(ctx)
1381 displayer.show(ctx)
1381 displayer.flush(ctx)
1382 displayer.flush(ctx)
1382
1383
1383 timer, fm = gettimer(ui, opts)
1384 timer, fm = gettimer(ui, opts)
1384 timer(format)
1385 timer(format)
1385 fm.end()
1386 fm.end()
1386
1387
1387 @command(b'perfhelper-pathcopies', formatteropts +
1388 @command(b'perfhelper-pathcopies', formatteropts +
1388 [
1389 [
1389 (b'r', b'revs', [], b'restrict search to these revisions'),
1390 (b'r', b'revs', [], b'restrict search to these revisions'),
1390 (b'', b'timing', False, b'provides extra data (costly)'),
1391 (b'', b'timing', False, b'provides extra data (costly)'),
1391 ])
1392 ])
1392 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1393 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1393 """find statistic about potential parameters for the `perftracecopies`
1394 """find statistic about potential parameters for the `perftracecopies`
1394
1395
1395 This command find source-destination pair relevant for copytracing testing.
1396 This command find source-destination pair relevant for copytracing testing.
1396 It report value for some of the parameters that impact copy tracing time.
1397 It report value for some of the parameters that impact copy tracing time.
1397
1398
1398 If `--timing` is set, rename detection is run and the associated timing
1399 If `--timing` is set, rename detection is run and the associated timing
1399 will be reported. The extra details comes at the cost of a slower command
1400 will be reported. The extra details comes at the cost of a slower command
1400 execution.
1401 execution.
1401
1402
1402 Since the rename detection is only run once, other factors might easily
1403 Since the rename detection is only run once, other factors might easily
1403 affect the precision of the timing. However it should give a good
1404 affect the precision of the timing. However it should give a good
1404 approximation of which revision pairs are very costly.
1405 approximation of which revision pairs are very costly.
1405 """
1406 """
1406 opts = _byteskwargs(opts)
1407 opts = _byteskwargs(opts)
1407 fm = ui.formatter(b'perf', opts)
1408 fm = ui.formatter(b'perf', opts)
1408 dotiming = opts[b'timing']
1409 dotiming = opts[b'timing']
1409
1410
1410 if dotiming:
1411 if dotiming:
1411 header = '%12s %12s %12s %12s %12s %12s\n'
1412 header = '%12s %12s %12s %12s %12s %12s\n'
1412 output = ("%(source)12s %(destination)12s "
1413 output = ("%(source)12s %(destination)12s "
1413 "%(nbrevs)12d %(nbmissingfiles)12d "
1414 "%(nbrevs)12d %(nbmissingfiles)12d "
1414 "%(nbrenamedfiles)12d %(time)18.5f\n")
1415 "%(nbrenamedfiles)12d %(time)18.5f\n")
1415 header_names = ("source", "destination", "nb-revs", "nb-files",
1416 header_names = ("source", "destination", "nb-revs", "nb-files",
1416 "nb-renames", "time")
1417 "nb-renames", "time")
1417 fm.plain(header % header_names)
1418 fm.plain(header % header_names)
1418 else:
1419 else:
1419 header = '%12s %12s %12s %12s\n'
1420 header = '%12s %12s %12s %12s\n'
1420 output = ("%(source)12s %(destination)12s "
1421 output = ("%(source)12s %(destination)12s "
1421 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1422 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1422 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1423 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1423
1424
1424 if not revs:
1425 if not revs:
1425 revs = ['all()']
1426 revs = ['all()']
1426 revs = scmutil.revrange(repo, revs)
1427 revs = scmutil.revrange(repo, revs)
1427
1428
1428 roi = repo.revs('merge() and %ld', revs)
1429 roi = repo.revs('merge() and %ld', revs)
1429 for r in roi:
1430 for r in roi:
1430 ctx = repo[r]
1431 ctx = repo[r]
1431 p1 = ctx.p1().rev()
1432 p1 = ctx.p1().rev()
1432 p2 = ctx.p2().rev()
1433 p2 = ctx.p2().rev()
1433 bases = repo.changelog._commonancestorsheads(p1, p2)
1434 bases = repo.changelog._commonancestorsheads(p1, p2)
1434 for p in (p1, p2):
1435 for p in (p1, p2):
1435 for b in bases:
1436 for b in bases:
1436 base = repo[b]
1437 base = repo[b]
1437 parent = repo[p]
1438 parent = repo[p]
1438 missing = copies._computeforwardmissing(base, parent)
1439 missing = copies._computeforwardmissing(base, parent)
1439 if not missing:
1440 if not missing:
1440 continue
1441 continue
1441 data = {
1442 data = {
1442 b'source': base.hex(),
1443 b'source': base.hex(),
1443 b'destination': parent.hex(),
1444 b'destination': parent.hex(),
1444 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1445 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1445 b'nbmissingfiles': len(missing),
1446 b'nbmissingfiles': len(missing),
1446 }
1447 }
1447 if dotiming:
1448 if dotiming:
1448 begin = util.timer()
1449 begin = util.timer()
1449 renames = copies.pathcopies(base, parent)
1450 renames = copies.pathcopies(base, parent)
1450 end = util.timer()
1451 end = util.timer()
1451 # not very stable timing since we did only one run
1452 # not very stable timing since we did only one run
1452 data['time'] = end - begin
1453 data['time'] = end - begin
1453 data['nbrenamedfiles'] = len(renames)
1454 data['nbrenamedfiles'] = len(renames)
1454 fm.startitem()
1455 fm.startitem()
1455 fm.data(**data)
1456 fm.data(**data)
1456 out = data.copy()
1457 out = data.copy()
1457 out['source'] = fm.hexfunc(base.node())
1458 out['source'] = fm.hexfunc(base.node())
1458 out['destination'] = fm.hexfunc(parent.node())
1459 out['destination'] = fm.hexfunc(parent.node())
1459 fm.plain(output % out)
1460 fm.plain(output % out)
1460
1461
1461 fm.end()
1462 fm.end()
1462
1463
1463 @command(b'perfcca', formatteropts)
1464 @command(b'perfcca', formatteropts)
1464 def perfcca(ui, repo, **opts):
1465 def perfcca(ui, repo, **opts):
1465 opts = _byteskwargs(opts)
1466 opts = _byteskwargs(opts)
1466 timer, fm = gettimer(ui, opts)
1467 timer, fm = gettimer(ui, opts)
1467 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1468 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1468 fm.end()
1469 fm.end()
1469
1470
1470 @command(b'perffncacheload', formatteropts)
1471 @command(b'perffncacheload', formatteropts)
1471 def perffncacheload(ui, repo, **opts):
1472 def perffncacheload(ui, repo, **opts):
1472 opts = _byteskwargs(opts)
1473 opts = _byteskwargs(opts)
1473 timer, fm = gettimer(ui, opts)
1474 timer, fm = gettimer(ui, opts)
1474 s = repo.store
1475 s = repo.store
1475 def d():
1476 def d():
1476 s.fncache._load()
1477 s.fncache._load()
1477 timer(d)
1478 timer(d)
1478 fm.end()
1479 fm.end()
1479
1480
1480 @command(b'perffncachewrite', formatteropts)
1481 @command(b'perffncachewrite', formatteropts)
1481 def perffncachewrite(ui, repo, **opts):
1482 def perffncachewrite(ui, repo, **opts):
1482 opts = _byteskwargs(opts)
1483 opts = _byteskwargs(opts)
1483 timer, fm = gettimer(ui, opts)
1484 timer, fm = gettimer(ui, opts)
1484 s = repo.store
1485 s = repo.store
1485 lock = repo.lock()
1486 lock = repo.lock()
1486 s.fncache._load()
1487 s.fncache._load()
1487 tr = repo.transaction(b'perffncachewrite')
1488 tr = repo.transaction(b'perffncachewrite')
1488 tr.addbackup(b'fncache')
1489 tr.addbackup(b'fncache')
1489 def d():
1490 def d():
1490 s.fncache._dirty = True
1491 s.fncache._dirty = True
1491 s.fncache.write(tr)
1492 s.fncache.write(tr)
1492 timer(d)
1493 timer(d)
1493 tr.close()
1494 tr.close()
1494 lock.release()
1495 lock.release()
1495 fm.end()
1496 fm.end()
1496
1497
1497 @command(b'perffncacheencode', formatteropts)
1498 @command(b'perffncacheencode', formatteropts)
1498 def perffncacheencode(ui, repo, **opts):
1499 def perffncacheencode(ui, repo, **opts):
1499 opts = _byteskwargs(opts)
1500 opts = _byteskwargs(opts)
1500 timer, fm = gettimer(ui, opts)
1501 timer, fm = gettimer(ui, opts)
1501 s = repo.store
1502 s = repo.store
1502 s.fncache._load()
1503 s.fncache._load()
1503 def d():
1504 def d():
1504 for p in s.fncache.entries:
1505 for p in s.fncache.entries:
1505 s.encode(p)
1506 s.encode(p)
1506 timer(d)
1507 timer(d)
1507 fm.end()
1508 fm.end()
1508
1509
1509 def _bdiffworker(q, blocks, xdiff, ready, done):
1510 def _bdiffworker(q, blocks, xdiff, ready, done):
1510 while not done.is_set():
1511 while not done.is_set():
1511 pair = q.get()
1512 pair = q.get()
1512 while pair is not None:
1513 while pair is not None:
1513 if xdiff:
1514 if xdiff:
1514 mdiff.bdiff.xdiffblocks(*pair)
1515 mdiff.bdiff.xdiffblocks(*pair)
1515 elif blocks:
1516 elif blocks:
1516 mdiff.bdiff.blocks(*pair)
1517 mdiff.bdiff.blocks(*pair)
1517 else:
1518 else:
1518 mdiff.textdiff(*pair)
1519 mdiff.textdiff(*pair)
1519 q.task_done()
1520 q.task_done()
1520 pair = q.get()
1521 pair = q.get()
1521 q.task_done() # for the None one
1522 q.task_done() # for the None one
1522 with ready:
1523 with ready:
1523 ready.wait()
1524 ready.wait()
1524
1525
1525 def _manifestrevision(repo, mnode):
1526 def _manifestrevision(repo, mnode):
1526 ml = repo.manifestlog
1527 ml = repo.manifestlog
1527
1528
1528 if util.safehasattr(ml, b'getstorage'):
1529 if util.safehasattr(ml, b'getstorage'):
1529 store = ml.getstorage(b'')
1530 store = ml.getstorage(b'')
1530 else:
1531 else:
1531 store = ml._revlog
1532 store = ml._revlog
1532
1533
1533 return store.revision(mnode)
1534 return store.revision(mnode)
1534
1535
1535 @command(b'perfbdiff', revlogopts + formatteropts + [
1536 @command(b'perfbdiff', revlogopts + formatteropts + [
1536 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1537 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1537 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1538 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1538 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1539 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1539 (b'', b'blocks', False, b'test computing diffs into blocks'),
1540 (b'', b'blocks', False, b'test computing diffs into blocks'),
1540 (b'', b'xdiff', False, b'use xdiff algorithm'),
1541 (b'', b'xdiff', False, b'use xdiff algorithm'),
1541 ],
1542 ],
1542
1543
1543 b'-c|-m|FILE REV')
1544 b'-c|-m|FILE REV')
1544 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1545 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1545 """benchmark a bdiff between revisions
1546 """benchmark a bdiff between revisions
1546
1547
1547 By default, benchmark a bdiff between its delta parent and itself.
1548 By default, benchmark a bdiff between its delta parent and itself.
1548
1549
1549 With ``--count``, benchmark bdiffs between delta parents and self for N
1550 With ``--count``, benchmark bdiffs between delta parents and self for N
1550 revisions starting at the specified revision.
1551 revisions starting at the specified revision.
1551
1552
1552 With ``--alldata``, assume the requested revision is a changeset and
1553 With ``--alldata``, assume the requested revision is a changeset and
1553 measure bdiffs for all changes related to that changeset (manifest
1554 measure bdiffs for all changes related to that changeset (manifest
1554 and filelogs).
1555 and filelogs).
1555 """
1556 """
1556 opts = _byteskwargs(opts)
1557 opts = _byteskwargs(opts)
1557
1558
1558 if opts[b'xdiff'] and not opts[b'blocks']:
1559 if opts[b'xdiff'] and not opts[b'blocks']:
1559 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1560 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1560
1561
1561 if opts[b'alldata']:
1562 if opts[b'alldata']:
1562 opts[b'changelog'] = True
1563 opts[b'changelog'] = True
1563
1564
1564 if opts.get(b'changelog') or opts.get(b'manifest'):
1565 if opts.get(b'changelog') or opts.get(b'manifest'):
1565 file_, rev = None, file_
1566 file_, rev = None, file_
1566 elif rev is None:
1567 elif rev is None:
1567 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1568 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1568
1569
1569 blocks = opts[b'blocks']
1570 blocks = opts[b'blocks']
1570 xdiff = opts[b'xdiff']
1571 xdiff = opts[b'xdiff']
1571 textpairs = []
1572 textpairs = []
1572
1573
1573 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1574 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1574
1575
1575 startrev = r.rev(r.lookup(rev))
1576 startrev = r.rev(r.lookup(rev))
1576 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1577 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1577 if opts[b'alldata']:
1578 if opts[b'alldata']:
1578 # Load revisions associated with changeset.
1579 # Load revisions associated with changeset.
1579 ctx = repo[rev]
1580 ctx = repo[rev]
1580 mtext = _manifestrevision(repo, ctx.manifestnode())
1581 mtext = _manifestrevision(repo, ctx.manifestnode())
1581 for pctx in ctx.parents():
1582 for pctx in ctx.parents():
1582 pman = _manifestrevision(repo, pctx.manifestnode())
1583 pman = _manifestrevision(repo, pctx.manifestnode())
1583 textpairs.append((pman, mtext))
1584 textpairs.append((pman, mtext))
1584
1585
1585 # Load filelog revisions by iterating manifest delta.
1586 # Load filelog revisions by iterating manifest delta.
1586 man = ctx.manifest()
1587 man = ctx.manifest()
1587 pman = ctx.p1().manifest()
1588 pman = ctx.p1().manifest()
1588 for filename, change in pman.diff(man).items():
1589 for filename, change in pman.diff(man).items():
1589 fctx = repo.file(filename)
1590 fctx = repo.file(filename)
1590 f1 = fctx.revision(change[0][0] or -1)
1591 f1 = fctx.revision(change[0][0] or -1)
1591 f2 = fctx.revision(change[1][0] or -1)
1592 f2 = fctx.revision(change[1][0] or -1)
1592 textpairs.append((f1, f2))
1593 textpairs.append((f1, f2))
1593 else:
1594 else:
1594 dp = r.deltaparent(rev)
1595 dp = r.deltaparent(rev)
1595 textpairs.append((r.revision(dp), r.revision(rev)))
1596 textpairs.append((r.revision(dp), r.revision(rev)))
1596
1597
1597 withthreads = threads > 0
1598 withthreads = threads > 0
1598 if not withthreads:
1599 if not withthreads:
1599 def d():
1600 def d():
1600 for pair in textpairs:
1601 for pair in textpairs:
1601 if xdiff:
1602 if xdiff:
1602 mdiff.bdiff.xdiffblocks(*pair)
1603 mdiff.bdiff.xdiffblocks(*pair)
1603 elif blocks:
1604 elif blocks:
1604 mdiff.bdiff.blocks(*pair)
1605 mdiff.bdiff.blocks(*pair)
1605 else:
1606 else:
1606 mdiff.textdiff(*pair)
1607 mdiff.textdiff(*pair)
1607 else:
1608 else:
1608 q = queue()
1609 q = queue()
1609 for i in _xrange(threads):
1610 for i in _xrange(threads):
1610 q.put(None)
1611 q.put(None)
1611 ready = threading.Condition()
1612 ready = threading.Condition()
1612 done = threading.Event()
1613 done = threading.Event()
1613 for i in _xrange(threads):
1614 for i in _xrange(threads):
1614 threading.Thread(target=_bdiffworker,
1615 threading.Thread(target=_bdiffworker,
1615 args=(q, blocks, xdiff, ready, done)).start()
1616 args=(q, blocks, xdiff, ready, done)).start()
1616 q.join()
1617 q.join()
1617 def d():
1618 def d():
1618 for pair in textpairs:
1619 for pair in textpairs:
1619 q.put(pair)
1620 q.put(pair)
1620 for i in _xrange(threads):
1621 for i in _xrange(threads):
1621 q.put(None)
1622 q.put(None)
1622 with ready:
1623 with ready:
1623 ready.notify_all()
1624 ready.notify_all()
1624 q.join()
1625 q.join()
1625 timer, fm = gettimer(ui, opts)
1626 timer, fm = gettimer(ui, opts)
1626 timer(d)
1627 timer(d)
1627 fm.end()
1628 fm.end()
1628
1629
1629 if withthreads:
1630 if withthreads:
1630 done.set()
1631 done.set()
1631 for i in _xrange(threads):
1632 for i in _xrange(threads):
1632 q.put(None)
1633 q.put(None)
1633 with ready:
1634 with ready:
1634 ready.notify_all()
1635 ready.notify_all()
1635
1636
1636 @command(b'perfunidiff', revlogopts + formatteropts + [
1637 @command(b'perfunidiff', revlogopts + formatteropts + [
1637 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1638 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1638 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1639 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1639 ], b'-c|-m|FILE REV')
1640 ], b'-c|-m|FILE REV')
1640 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1641 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1641 """benchmark a unified diff between revisions
1642 """benchmark a unified diff between revisions
1642
1643
1643 This doesn't include any copy tracing - it's just a unified diff
1644 This doesn't include any copy tracing - it's just a unified diff
1644 of the texts.
1645 of the texts.
1645
1646
1646 By default, benchmark a diff between its delta parent and itself.
1647 By default, benchmark a diff between its delta parent and itself.
1647
1648
1648 With ``--count``, benchmark diffs between delta parents and self for N
1649 With ``--count``, benchmark diffs between delta parents and self for N
1649 revisions starting at the specified revision.
1650 revisions starting at the specified revision.
1650
1651
1651 With ``--alldata``, assume the requested revision is a changeset and
1652 With ``--alldata``, assume the requested revision is a changeset and
1652 measure diffs for all changes related to that changeset (manifest
1653 measure diffs for all changes related to that changeset (manifest
1653 and filelogs).
1654 and filelogs).
1654 """
1655 """
1655 opts = _byteskwargs(opts)
1656 opts = _byteskwargs(opts)
1656 if opts[b'alldata']:
1657 if opts[b'alldata']:
1657 opts[b'changelog'] = True
1658 opts[b'changelog'] = True
1658
1659
1659 if opts.get(b'changelog') or opts.get(b'manifest'):
1660 if opts.get(b'changelog') or opts.get(b'manifest'):
1660 file_, rev = None, file_
1661 file_, rev = None, file_
1661 elif rev is None:
1662 elif rev is None:
1662 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1663 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1663
1664
1664 textpairs = []
1665 textpairs = []
1665
1666
1666 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1667 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1667
1668
1668 startrev = r.rev(r.lookup(rev))
1669 startrev = r.rev(r.lookup(rev))
1669 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1670 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1670 if opts[b'alldata']:
1671 if opts[b'alldata']:
1671 # Load revisions associated with changeset.
1672 # Load revisions associated with changeset.
1672 ctx = repo[rev]
1673 ctx = repo[rev]
1673 mtext = _manifestrevision(repo, ctx.manifestnode())
1674 mtext = _manifestrevision(repo, ctx.manifestnode())
1674 for pctx in ctx.parents():
1675 for pctx in ctx.parents():
1675 pman = _manifestrevision(repo, pctx.manifestnode())
1676 pman = _manifestrevision(repo, pctx.manifestnode())
1676 textpairs.append((pman, mtext))
1677 textpairs.append((pman, mtext))
1677
1678
1678 # Load filelog revisions by iterating manifest delta.
1679 # Load filelog revisions by iterating manifest delta.
1679 man = ctx.manifest()
1680 man = ctx.manifest()
1680 pman = ctx.p1().manifest()
1681 pman = ctx.p1().manifest()
1681 for filename, change in pman.diff(man).items():
1682 for filename, change in pman.diff(man).items():
1682 fctx = repo.file(filename)
1683 fctx = repo.file(filename)
1683 f1 = fctx.revision(change[0][0] or -1)
1684 f1 = fctx.revision(change[0][0] or -1)
1684 f2 = fctx.revision(change[1][0] or -1)
1685 f2 = fctx.revision(change[1][0] or -1)
1685 textpairs.append((f1, f2))
1686 textpairs.append((f1, f2))
1686 else:
1687 else:
1687 dp = r.deltaparent(rev)
1688 dp = r.deltaparent(rev)
1688 textpairs.append((r.revision(dp), r.revision(rev)))
1689 textpairs.append((r.revision(dp), r.revision(rev)))
1689
1690
1690 def d():
1691 def d():
1691 for left, right in textpairs:
1692 for left, right in textpairs:
1692 # The date strings don't matter, so we pass empty strings.
1693 # The date strings don't matter, so we pass empty strings.
1693 headerlines, hunks = mdiff.unidiff(
1694 headerlines, hunks = mdiff.unidiff(
1694 left, b'', right, b'', b'left', b'right', binary=False)
1695 left, b'', right, b'', b'left', b'right', binary=False)
1695 # consume iterators in roughly the way patch.py does
1696 # consume iterators in roughly the way patch.py does
1696 b'\n'.join(headerlines)
1697 b'\n'.join(headerlines)
1697 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1698 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1698 timer, fm = gettimer(ui, opts)
1699 timer, fm = gettimer(ui, opts)
1699 timer(d)
1700 timer(d)
1700 fm.end()
1701 fm.end()
1701
1702
1702 @command(b'perfdiffwd', formatteropts)
1703 @command(b'perfdiffwd', formatteropts)
1703 def perfdiffwd(ui, repo, **opts):
1704 def perfdiffwd(ui, repo, **opts):
1704 """Profile diff of working directory changes"""
1705 """Profile diff of working directory changes"""
1705 opts = _byteskwargs(opts)
1706 opts = _byteskwargs(opts)
1706 timer, fm = gettimer(ui, opts)
1707 timer, fm = gettimer(ui, opts)
1707 options = {
1708 options = {
1708 'w': 'ignore_all_space',
1709 'w': 'ignore_all_space',
1709 'b': 'ignore_space_change',
1710 'b': 'ignore_space_change',
1710 'B': 'ignore_blank_lines',
1711 'B': 'ignore_blank_lines',
1711 }
1712 }
1712
1713
1713 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1714 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1714 opts = dict((options[c], b'1') for c in diffopt)
1715 opts = dict((options[c], b'1') for c in diffopt)
1715 def d():
1716 def d():
1716 ui.pushbuffer()
1717 ui.pushbuffer()
1717 commands.diff(ui, repo, **opts)
1718 commands.diff(ui, repo, **opts)
1718 ui.popbuffer()
1719 ui.popbuffer()
1719 diffopt = diffopt.encode('ascii')
1720 diffopt = diffopt.encode('ascii')
1720 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1721 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1721 timer(d, title=title)
1722 timer(d, title=title)
1722 fm.end()
1723 fm.end()
1723
1724
1724 @command(b'perfrevlogindex', revlogopts + formatteropts,
1725 @command(b'perfrevlogindex', revlogopts + formatteropts,
1725 b'-c|-m|FILE')
1726 b'-c|-m|FILE')
1726 def perfrevlogindex(ui, repo, file_=None, **opts):
1727 def perfrevlogindex(ui, repo, file_=None, **opts):
1727 """Benchmark operations against a revlog index.
1728 """Benchmark operations against a revlog index.
1728
1729
1729 This tests constructing a revlog instance, reading index data,
1730 This tests constructing a revlog instance, reading index data,
1730 parsing index data, and performing various operations related to
1731 parsing index data, and performing various operations related to
1731 index data.
1732 index data.
1732 """
1733 """
1733
1734
1734 opts = _byteskwargs(opts)
1735 opts = _byteskwargs(opts)
1735
1736
1736 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1737 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1737
1738
1738 opener = getattr(rl, 'opener') # trick linter
1739 opener = getattr(rl, 'opener') # trick linter
1739 indexfile = rl.indexfile
1740 indexfile = rl.indexfile
1740 data = opener.read(indexfile)
1741 data = opener.read(indexfile)
1741
1742
1742 header = struct.unpack(b'>I', data[0:4])[0]
1743 header = struct.unpack(b'>I', data[0:4])[0]
1743 version = header & 0xFFFF
1744 version = header & 0xFFFF
1744 if version == 1:
1745 if version == 1:
1745 revlogio = revlog.revlogio()
1746 revlogio = revlog.revlogio()
1746 inline = header & (1 << 16)
1747 inline = header & (1 << 16)
1747 else:
1748 else:
1748 raise error.Abort((b'unsupported revlog version: %d') % version)
1749 raise error.Abort((b'unsupported revlog version: %d') % version)
1749
1750
1750 rllen = len(rl)
1751 rllen = len(rl)
1751
1752
1752 node0 = rl.node(0)
1753 node0 = rl.node(0)
1753 node25 = rl.node(rllen // 4)
1754 node25 = rl.node(rllen // 4)
1754 node50 = rl.node(rllen // 2)
1755 node50 = rl.node(rllen // 2)
1755 node75 = rl.node(rllen // 4 * 3)
1756 node75 = rl.node(rllen // 4 * 3)
1756 node100 = rl.node(rllen - 1)
1757 node100 = rl.node(rllen - 1)
1757
1758
1758 allrevs = range(rllen)
1759 allrevs = range(rllen)
1759 allrevsrev = list(reversed(allrevs))
1760 allrevsrev = list(reversed(allrevs))
1760 allnodes = [rl.node(rev) for rev in range(rllen)]
1761 allnodes = [rl.node(rev) for rev in range(rllen)]
1761 allnodesrev = list(reversed(allnodes))
1762 allnodesrev = list(reversed(allnodes))
1762
1763
1763 def constructor():
1764 def constructor():
1764 revlog.revlog(opener, indexfile)
1765 revlog.revlog(opener, indexfile)
1765
1766
1766 def read():
1767 def read():
1767 with opener(indexfile) as fh:
1768 with opener(indexfile) as fh:
1768 fh.read()
1769 fh.read()
1769
1770
1770 def parseindex():
1771 def parseindex():
1771 revlogio.parseindex(data, inline)
1772 revlogio.parseindex(data, inline)
1772
1773
1773 def getentry(revornode):
1774 def getentry(revornode):
1774 index = revlogio.parseindex(data, inline)[0]
1775 index = revlogio.parseindex(data, inline)[0]
1775 index[revornode]
1776 index[revornode]
1776
1777
1777 def getentries(revs, count=1):
1778 def getentries(revs, count=1):
1778 index = revlogio.parseindex(data, inline)[0]
1779 index = revlogio.parseindex(data, inline)[0]
1779
1780
1780 for i in range(count):
1781 for i in range(count):
1781 for rev in revs:
1782 for rev in revs:
1782 index[rev]
1783 index[rev]
1783
1784
1784 def resolvenode(node):
1785 def resolvenode(node):
1785 nodemap = revlogio.parseindex(data, inline)[1]
1786 nodemap = revlogio.parseindex(data, inline)[1]
1786 # This only works for the C code.
1787 # This only works for the C code.
1787 if nodemap is None:
1788 if nodemap is None:
1788 return
1789 return
1789
1790
1790 try:
1791 try:
1791 nodemap[node]
1792 nodemap[node]
1792 except error.RevlogError:
1793 except error.RevlogError:
1793 pass
1794 pass
1794
1795
1795 def resolvenodes(nodes, count=1):
1796 def resolvenodes(nodes, count=1):
1796 nodemap = revlogio.parseindex(data, inline)[1]
1797 nodemap = revlogio.parseindex(data, inline)[1]
1797 if nodemap is None:
1798 if nodemap is None:
1798 return
1799 return
1799
1800
1800 for i in range(count):
1801 for i in range(count):
1801 for node in nodes:
1802 for node in nodes:
1802 try:
1803 try:
1803 nodemap[node]
1804 nodemap[node]
1804 except error.RevlogError:
1805 except error.RevlogError:
1805 pass
1806 pass
1806
1807
1807 benches = [
1808 benches = [
1808 (constructor, b'revlog constructor'),
1809 (constructor, b'revlog constructor'),
1809 (read, b'read'),
1810 (read, b'read'),
1810 (parseindex, b'create index object'),
1811 (parseindex, b'create index object'),
1811 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1812 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1812 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1813 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1813 (lambda: resolvenode(node0), b'look up node at rev 0'),
1814 (lambda: resolvenode(node0), b'look up node at rev 0'),
1814 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1815 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1815 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1816 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1816 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1817 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1817 (lambda: resolvenode(node100), b'look up node at tip'),
1818 (lambda: resolvenode(node100), b'look up node at tip'),
1818 # 2x variation is to measure caching impact.
1819 # 2x variation is to measure caching impact.
1819 (lambda: resolvenodes(allnodes),
1820 (lambda: resolvenodes(allnodes),
1820 b'look up all nodes (forward)'),
1821 b'look up all nodes (forward)'),
1821 (lambda: resolvenodes(allnodes, 2),
1822 (lambda: resolvenodes(allnodes, 2),
1822 b'look up all nodes 2x (forward)'),
1823 b'look up all nodes 2x (forward)'),
1823 (lambda: resolvenodes(allnodesrev),
1824 (lambda: resolvenodes(allnodesrev),
1824 b'look up all nodes (reverse)'),
1825 b'look up all nodes (reverse)'),
1825 (lambda: resolvenodes(allnodesrev, 2),
1826 (lambda: resolvenodes(allnodesrev, 2),
1826 b'look up all nodes 2x (reverse)'),
1827 b'look up all nodes 2x (reverse)'),
1827 (lambda: getentries(allrevs),
1828 (lambda: getentries(allrevs),
1828 b'retrieve all index entries (forward)'),
1829 b'retrieve all index entries (forward)'),
1829 (lambda: getentries(allrevs, 2),
1830 (lambda: getentries(allrevs, 2),
1830 b'retrieve all index entries 2x (forward)'),
1831 b'retrieve all index entries 2x (forward)'),
1831 (lambda: getentries(allrevsrev),
1832 (lambda: getentries(allrevsrev),
1832 b'retrieve all index entries (reverse)'),
1833 b'retrieve all index entries (reverse)'),
1833 (lambda: getentries(allrevsrev, 2),
1834 (lambda: getentries(allrevsrev, 2),
1834 b'retrieve all index entries 2x (reverse)'),
1835 b'retrieve all index entries 2x (reverse)'),
1835 ]
1836 ]
1836
1837
1837 for fn, title in benches:
1838 for fn, title in benches:
1838 timer, fm = gettimer(ui, opts)
1839 timer, fm = gettimer(ui, opts)
1839 timer(fn, title=title)
1840 timer(fn, title=title)
1840 fm.end()
1841 fm.end()
1841
1842
1842 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1843 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1843 [(b'd', b'dist', 100, b'distance between the revisions'),
1844 [(b'd', b'dist', 100, b'distance between the revisions'),
1844 (b's', b'startrev', 0, b'revision to start reading at'),
1845 (b's', b'startrev', 0, b'revision to start reading at'),
1845 (b'', b'reverse', False, b'read in reverse')],
1846 (b'', b'reverse', False, b'read in reverse')],
1846 b'-c|-m|FILE')
1847 b'-c|-m|FILE')
1847 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1848 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1848 **opts):
1849 **opts):
1849 """Benchmark reading a series of revisions from a revlog.
1850 """Benchmark reading a series of revisions from a revlog.
1850
1851
1851 By default, we read every ``-d/--dist`` revision from 0 to tip of
1852 By default, we read every ``-d/--dist`` revision from 0 to tip of
1852 the specified revlog.
1853 the specified revlog.
1853
1854
1854 The start revision can be defined via ``-s/--startrev``.
1855 The start revision can be defined via ``-s/--startrev``.
1855 """
1856 """
1856 opts = _byteskwargs(opts)
1857 opts = _byteskwargs(opts)
1857
1858
1858 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1859 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1859 rllen = getlen(ui)(rl)
1860 rllen = getlen(ui)(rl)
1860
1861
1861 if startrev < 0:
1862 if startrev < 0:
1862 startrev = rllen + startrev
1863 startrev = rllen + startrev
1863
1864
1864 def d():
1865 def d():
1865 rl.clearcaches()
1866 rl.clearcaches()
1866
1867
1867 beginrev = startrev
1868 beginrev = startrev
1868 endrev = rllen
1869 endrev = rllen
1869 dist = opts[b'dist']
1870 dist = opts[b'dist']
1870
1871
1871 if reverse:
1872 if reverse:
1872 beginrev, endrev = endrev - 1, beginrev - 1
1873 beginrev, endrev = endrev - 1, beginrev - 1
1873 dist = -1 * dist
1874 dist = -1 * dist
1874
1875
1875 for x in _xrange(beginrev, endrev, dist):
1876 for x in _xrange(beginrev, endrev, dist):
1876 # Old revisions don't support passing int.
1877 # Old revisions don't support passing int.
1877 n = rl.node(x)
1878 n = rl.node(x)
1878 rl.revision(n)
1879 rl.revision(n)
1879
1880
1880 timer, fm = gettimer(ui, opts)
1881 timer, fm = gettimer(ui, opts)
1881 timer(d)
1882 timer(d)
1882 fm.end()
1883 fm.end()
1883
1884
1884 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1885 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1885 [(b's', b'startrev', 1000, b'revision to start writing at'),
1886 [(b's', b'startrev', 1000, b'revision to start writing at'),
1886 (b'', b'stoprev', -1, b'last revision to write'),
1887 (b'', b'stoprev', -1, b'last revision to write'),
1887 (b'', b'count', 3, b'last revision to write'),
1888 (b'', b'count', 3, b'last revision to write'),
1888 (b'', b'details', False, b'print timing for every revisions tested'),
1889 (b'', b'details', False, b'print timing for every revisions tested'),
1889 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1890 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1890 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1891 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1891 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1892 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1892 ],
1893 ],
1893 b'-c|-m|FILE')
1894 b'-c|-m|FILE')
1894 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1895 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1895 """Benchmark writing a series of revisions to a revlog.
1896 """Benchmark writing a series of revisions to a revlog.
1896
1897
1897 Possible source values are:
1898 Possible source values are:
1898 * `full`: add from a full text (default).
1899 * `full`: add from a full text (default).
1899 * `parent-1`: add from a delta to the first parent
1900 * `parent-1`: add from a delta to the first parent
1900 * `parent-2`: add from a delta to the second parent if it exists
1901 * `parent-2`: add from a delta to the second parent if it exists
1901 (use a delta from the first parent otherwise)
1902 (use a delta from the first parent otherwise)
1902 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1903 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1903 * `storage`: add from the existing precomputed deltas
1904 * `storage`: add from the existing precomputed deltas
1904 """
1905 """
1905 opts = _byteskwargs(opts)
1906 opts = _byteskwargs(opts)
1906
1907
1907 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1908 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1908 rllen = getlen(ui)(rl)
1909 rllen = getlen(ui)(rl)
1909 if startrev < 0:
1910 if startrev < 0:
1910 startrev = rllen + startrev
1911 startrev = rllen + startrev
1911 if stoprev < 0:
1912 if stoprev < 0:
1912 stoprev = rllen + stoprev
1913 stoprev = rllen + stoprev
1913
1914
1914 lazydeltabase = opts['lazydeltabase']
1915 lazydeltabase = opts['lazydeltabase']
1915 source = opts['source']
1916 source = opts['source']
1916 clearcaches = opts['clear_caches']
1917 clearcaches = opts['clear_caches']
1917 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1918 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1918 b'storage')
1919 b'storage')
1919 if source not in validsource:
1920 if source not in validsource:
1920 raise error.Abort('invalid source type: %s' % source)
1921 raise error.Abort('invalid source type: %s' % source)
1921
1922
1922 ### actually gather results
1923 ### actually gather results
1923 count = opts['count']
1924 count = opts['count']
1924 if count <= 0:
1925 if count <= 0:
1925 raise error.Abort('invalide run count: %d' % count)
1926 raise error.Abort('invalide run count: %d' % count)
1926 allresults = []
1927 allresults = []
1927 for c in range(count):
1928 for c in range(count):
1928 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1929 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1929 lazydeltabase=lazydeltabase,
1930 lazydeltabase=lazydeltabase,
1930 clearcaches=clearcaches)
1931 clearcaches=clearcaches)
1931 allresults.append(timing)
1932 allresults.append(timing)
1932
1933
1933 ### consolidate the results in a single list
1934 ### consolidate the results in a single list
1934 results = []
1935 results = []
1935 for idx, (rev, t) in enumerate(allresults[0]):
1936 for idx, (rev, t) in enumerate(allresults[0]):
1936 ts = [t]
1937 ts = [t]
1937 for other in allresults[1:]:
1938 for other in allresults[1:]:
1938 orev, ot = other[idx]
1939 orev, ot = other[idx]
1939 assert orev == rev
1940 assert orev == rev
1940 ts.append(ot)
1941 ts.append(ot)
1941 results.append((rev, ts))
1942 results.append((rev, ts))
1942 resultcount = len(results)
1943 resultcount = len(results)
1943
1944
1944 ### Compute and display relevant statistics
1945 ### Compute and display relevant statistics
1945
1946
1946 # get a formatter
1947 # get a formatter
1947 fm = ui.formatter(b'perf', opts)
1948 fm = ui.formatter(b'perf', opts)
1948 displayall = ui.configbool(b"perf", b"all-timing", False)
1949 displayall = ui.configbool(b"perf", b"all-timing", False)
1949
1950
1950 # print individual details if requested
1951 # print individual details if requested
1951 if opts['details']:
1952 if opts['details']:
1952 for idx, item in enumerate(results, 1):
1953 for idx, item in enumerate(results, 1):
1953 rev, data = item
1954 rev, data = item
1954 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1955 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1955 formatone(fm, data, title=title, displayall=displayall)
1956 formatone(fm, data, title=title, displayall=displayall)
1956
1957
1957 # sorts results by median time
1958 # sorts results by median time
1958 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1959 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1959 # list of (name, index) to display)
1960 # list of (name, index) to display)
1960 relevants = [
1961 relevants = [
1961 ("min", 0),
1962 ("min", 0),
1962 ("10%", resultcount * 10 // 100),
1963 ("10%", resultcount * 10 // 100),
1963 ("25%", resultcount * 25 // 100),
1964 ("25%", resultcount * 25 // 100),
1964 ("50%", resultcount * 70 // 100),
1965 ("50%", resultcount * 70 // 100),
1965 ("75%", resultcount * 75 // 100),
1966 ("75%", resultcount * 75 // 100),
1966 ("90%", resultcount * 90 // 100),
1967 ("90%", resultcount * 90 // 100),
1967 ("95%", resultcount * 95 // 100),
1968 ("95%", resultcount * 95 // 100),
1968 ("99%", resultcount * 99 // 100),
1969 ("99%", resultcount * 99 // 100),
1969 ("99.9%", resultcount * 999 // 1000),
1970 ("99.9%", resultcount * 999 // 1000),
1970 ("99.99%", resultcount * 9999 // 10000),
1971 ("99.99%", resultcount * 9999 // 10000),
1971 ("99.999%", resultcount * 99999 // 100000),
1972 ("99.999%", resultcount * 99999 // 100000),
1972 ("max", -1),
1973 ("max", -1),
1973 ]
1974 ]
1974 if not ui.quiet:
1975 if not ui.quiet:
1975 for name, idx in relevants:
1976 for name, idx in relevants:
1976 data = results[idx]
1977 data = results[idx]
1977 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1978 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1978 formatone(fm, data[1], title=title, displayall=displayall)
1979 formatone(fm, data[1], title=title, displayall=displayall)
1979
1980
1980 # XXX summing that many float will not be very precise, we ignore this fact
1981 # XXX summing that many float will not be very precise, we ignore this fact
1981 # for now
1982 # for now
1982 totaltime = []
1983 totaltime = []
1983 for item in allresults:
1984 for item in allresults:
1984 totaltime.append((sum(x[1][0] for x in item),
1985 totaltime.append((sum(x[1][0] for x in item),
1985 sum(x[1][1] for x in item),
1986 sum(x[1][1] for x in item),
1986 sum(x[1][2] for x in item),)
1987 sum(x[1][2] for x in item),)
1987 )
1988 )
1988 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1989 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1989 displayall=displayall)
1990 displayall=displayall)
1990 fm.end()
1991 fm.end()
1991
1992
1992 class _faketr(object):
1993 class _faketr(object):
1993 def add(s, x, y, z=None):
1994 def add(s, x, y, z=None):
1994 return None
1995 return None
1995
1996
1996 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1997 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1997 lazydeltabase=True, clearcaches=True):
1998 lazydeltabase=True, clearcaches=True):
1998 timings = []
1999 timings = []
1999 tr = _faketr()
2000 tr = _faketr()
2000 with _temprevlog(ui, orig, startrev) as dest:
2001 with _temprevlog(ui, orig, startrev) as dest:
2001 dest._lazydeltabase = lazydeltabase
2002 dest._lazydeltabase = lazydeltabase
2002 revs = list(orig.revs(startrev, stoprev))
2003 revs = list(orig.revs(startrev, stoprev))
2003 total = len(revs)
2004 total = len(revs)
2004 topic = 'adding'
2005 topic = 'adding'
2005 if runidx is not None:
2006 if runidx is not None:
2006 topic += ' (run #%d)' % runidx
2007 topic += ' (run #%d)' % runidx
2007 # Support both old and new progress API
2008 # Support both old and new progress API
2008 if util.safehasattr(ui, 'makeprogress'):
2009 if util.safehasattr(ui, 'makeprogress'):
2009 progress = ui.makeprogress(topic, unit='revs', total=total)
2010 progress = ui.makeprogress(topic, unit='revs', total=total)
2010 def updateprogress(pos):
2011 def updateprogress(pos):
2011 progress.update(pos)
2012 progress.update(pos)
2012 def completeprogress():
2013 def completeprogress():
2013 progress.complete()
2014 progress.complete()
2014 else:
2015 else:
2015 def updateprogress(pos):
2016 def updateprogress(pos):
2016 ui.progress(topic, pos, unit='revs', total=total)
2017 ui.progress(topic, pos, unit='revs', total=total)
2017 def completeprogress():
2018 def completeprogress():
2018 ui.progress(topic, None, unit='revs', total=total)
2019 ui.progress(topic, None, unit='revs', total=total)
2019
2020
2020 for idx, rev in enumerate(revs):
2021 for idx, rev in enumerate(revs):
2021 updateprogress(idx)
2022 updateprogress(idx)
2022 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2023 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2023 if clearcaches:
2024 if clearcaches:
2024 dest.index.clearcaches()
2025 dest.index.clearcaches()
2025 dest.clearcaches()
2026 dest.clearcaches()
2026 with timeone() as r:
2027 with timeone() as r:
2027 dest.addrawrevision(*addargs, **addkwargs)
2028 dest.addrawrevision(*addargs, **addkwargs)
2028 timings.append((rev, r[0]))
2029 timings.append((rev, r[0]))
2029 updateprogress(total)
2030 updateprogress(total)
2030 completeprogress()
2031 completeprogress()
2031 return timings
2032 return timings
2032
2033
2033 def _getrevisionseed(orig, rev, tr, source):
2034 def _getrevisionseed(orig, rev, tr, source):
2034 from mercurial.node import nullid
2035 from mercurial.node import nullid
2035
2036
2036 linkrev = orig.linkrev(rev)
2037 linkrev = orig.linkrev(rev)
2037 node = orig.node(rev)
2038 node = orig.node(rev)
2038 p1, p2 = orig.parents(node)
2039 p1, p2 = orig.parents(node)
2039 flags = orig.flags(rev)
2040 flags = orig.flags(rev)
2040 cachedelta = None
2041 cachedelta = None
2041 text = None
2042 text = None
2042
2043
2043 if source == b'full':
2044 if source == b'full':
2044 text = orig.revision(rev)
2045 text = orig.revision(rev)
2045 elif source == b'parent-1':
2046 elif source == b'parent-1':
2046 baserev = orig.rev(p1)
2047 baserev = orig.rev(p1)
2047 cachedelta = (baserev, orig.revdiff(p1, rev))
2048 cachedelta = (baserev, orig.revdiff(p1, rev))
2048 elif source == b'parent-2':
2049 elif source == b'parent-2':
2049 parent = p2
2050 parent = p2
2050 if p2 == nullid:
2051 if p2 == nullid:
2051 parent = p1
2052 parent = p1
2052 baserev = orig.rev(parent)
2053 baserev = orig.rev(parent)
2053 cachedelta = (baserev, orig.revdiff(parent, rev))
2054 cachedelta = (baserev, orig.revdiff(parent, rev))
2054 elif source == b'parent-smallest':
2055 elif source == b'parent-smallest':
2055 p1diff = orig.revdiff(p1, rev)
2056 p1diff = orig.revdiff(p1, rev)
2056 parent = p1
2057 parent = p1
2057 diff = p1diff
2058 diff = p1diff
2058 if p2 != nullid:
2059 if p2 != nullid:
2059 p2diff = orig.revdiff(p2, rev)
2060 p2diff = orig.revdiff(p2, rev)
2060 if len(p1diff) > len(p2diff):
2061 if len(p1diff) > len(p2diff):
2061 parent = p2
2062 parent = p2
2062 diff = p2diff
2063 diff = p2diff
2063 baserev = orig.rev(parent)
2064 baserev = orig.rev(parent)
2064 cachedelta = (baserev, diff)
2065 cachedelta = (baserev, diff)
2065 elif source == b'storage':
2066 elif source == b'storage':
2066 baserev = orig.deltaparent(rev)
2067 baserev = orig.deltaparent(rev)
2067 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2068 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2068
2069
2069 return ((text, tr, linkrev, p1, p2),
2070 return ((text, tr, linkrev, p1, p2),
2070 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2071 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2071
2072
2072 @contextlib.contextmanager
2073 @contextlib.contextmanager
2073 def _temprevlog(ui, orig, truncaterev):
2074 def _temprevlog(ui, orig, truncaterev):
2074 from mercurial import vfs as vfsmod
2075 from mercurial import vfs as vfsmod
2075
2076
2076 if orig._inline:
2077 if orig._inline:
2077 raise error.Abort('not supporting inline revlog (yet)')
2078 raise error.Abort('not supporting inline revlog (yet)')
2078
2079
2079 origindexpath = orig.opener.join(orig.indexfile)
2080 origindexpath = orig.opener.join(orig.indexfile)
2080 origdatapath = orig.opener.join(orig.datafile)
2081 origdatapath = orig.opener.join(orig.datafile)
2081 indexname = 'revlog.i'
2082 indexname = 'revlog.i'
2082 dataname = 'revlog.d'
2083 dataname = 'revlog.d'
2083
2084
2084 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2085 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2085 try:
2086 try:
2086 # copy the data file in a temporary directory
2087 # copy the data file in a temporary directory
2087 ui.debug('copying data in %s\n' % tmpdir)
2088 ui.debug('copying data in %s\n' % tmpdir)
2088 destindexpath = os.path.join(tmpdir, 'revlog.i')
2089 destindexpath = os.path.join(tmpdir, 'revlog.i')
2089 destdatapath = os.path.join(tmpdir, 'revlog.d')
2090 destdatapath = os.path.join(tmpdir, 'revlog.d')
2090 shutil.copyfile(origindexpath, destindexpath)
2091 shutil.copyfile(origindexpath, destindexpath)
2091 shutil.copyfile(origdatapath, destdatapath)
2092 shutil.copyfile(origdatapath, destdatapath)
2092
2093
2093 # remove the data we want to add again
2094 # remove the data we want to add again
2094 ui.debug('truncating data to be rewritten\n')
2095 ui.debug('truncating data to be rewritten\n')
2095 with open(destindexpath, 'ab') as index:
2096 with open(destindexpath, 'ab') as index:
2096 index.seek(0)
2097 index.seek(0)
2097 index.truncate(truncaterev * orig._io.size)
2098 index.truncate(truncaterev * orig._io.size)
2098 with open(destdatapath, 'ab') as data:
2099 with open(destdatapath, 'ab') as data:
2099 data.seek(0)
2100 data.seek(0)
2100 data.truncate(orig.start(truncaterev))
2101 data.truncate(orig.start(truncaterev))
2101
2102
2102 # instantiate a new revlog from the temporary copy
2103 # instantiate a new revlog from the temporary copy
2103 ui.debug('truncating adding to be rewritten\n')
2104 ui.debug('truncating adding to be rewritten\n')
2104 vfs = vfsmod.vfs(tmpdir)
2105 vfs = vfsmod.vfs(tmpdir)
2105 vfs.options = getattr(orig.opener, 'options', None)
2106 vfs.options = getattr(orig.opener, 'options', None)
2106
2107
2107 dest = revlog.revlog(vfs,
2108 dest = revlog.revlog(vfs,
2108 indexfile=indexname,
2109 indexfile=indexname,
2109 datafile=dataname)
2110 datafile=dataname)
2110 if dest._inline:
2111 if dest._inline:
2111 raise error.Abort('not supporting inline revlog (yet)')
2112 raise error.Abort('not supporting inline revlog (yet)')
2112 # make sure internals are initialized
2113 # make sure internals are initialized
2113 dest.revision(len(dest) - 1)
2114 dest.revision(len(dest) - 1)
2114 yield dest
2115 yield dest
2115 del dest, vfs
2116 del dest, vfs
2116 finally:
2117 finally:
2117 shutil.rmtree(tmpdir, True)
2118 shutil.rmtree(tmpdir, True)
2118
2119
2119 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2120 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2120 [(b'e', b'engines', b'', b'compression engines to use'),
2121 [(b'e', b'engines', b'', b'compression engines to use'),
2121 (b's', b'startrev', 0, b'revision to start at')],
2122 (b's', b'startrev', 0, b'revision to start at')],
2122 b'-c|-m|FILE')
2123 b'-c|-m|FILE')
2123 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2124 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2124 """Benchmark operations on revlog chunks.
2125 """Benchmark operations on revlog chunks.
2125
2126
2126 Logically, each revlog is a collection of fulltext revisions. However,
2127 Logically, each revlog is a collection of fulltext revisions. However,
2127 stored within each revlog are "chunks" of possibly compressed data. This
2128 stored within each revlog are "chunks" of possibly compressed data. This
2128 data needs to be read and decompressed or compressed and written.
2129 data needs to be read and decompressed or compressed and written.
2129
2130
2130 This command measures the time it takes to read+decompress and recompress
2131 This command measures the time it takes to read+decompress and recompress
2131 chunks in a revlog. It effectively isolates I/O and compression performance.
2132 chunks in a revlog. It effectively isolates I/O and compression performance.
2132 For measurements of higher-level operations like resolving revisions,
2133 For measurements of higher-level operations like resolving revisions,
2133 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2134 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2134 """
2135 """
2135 opts = _byteskwargs(opts)
2136 opts = _byteskwargs(opts)
2136
2137
2137 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2138 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2138
2139
2139 # _chunkraw was renamed to _getsegmentforrevs.
2140 # _chunkraw was renamed to _getsegmentforrevs.
2140 try:
2141 try:
2141 segmentforrevs = rl._getsegmentforrevs
2142 segmentforrevs = rl._getsegmentforrevs
2142 except AttributeError:
2143 except AttributeError:
2143 segmentforrevs = rl._chunkraw
2144 segmentforrevs = rl._chunkraw
2144
2145
2145 # Verify engines argument.
2146 # Verify engines argument.
2146 if engines:
2147 if engines:
2147 engines = set(e.strip() for e in engines.split(b','))
2148 engines = set(e.strip() for e in engines.split(b','))
2148 for engine in engines:
2149 for engine in engines:
2149 try:
2150 try:
2150 util.compressionengines[engine]
2151 util.compressionengines[engine]
2151 except KeyError:
2152 except KeyError:
2152 raise error.Abort(b'unknown compression engine: %s' % engine)
2153 raise error.Abort(b'unknown compression engine: %s' % engine)
2153 else:
2154 else:
2154 engines = []
2155 engines = []
2155 for e in util.compengines:
2156 for e in util.compengines:
2156 engine = util.compengines[e]
2157 engine = util.compengines[e]
2157 try:
2158 try:
2158 if engine.available():
2159 if engine.available():
2159 engine.revlogcompressor().compress(b'dummy')
2160 engine.revlogcompressor().compress(b'dummy')
2160 engines.append(e)
2161 engines.append(e)
2161 except NotImplementedError:
2162 except NotImplementedError:
2162 pass
2163 pass
2163
2164
2164 revs = list(rl.revs(startrev, len(rl) - 1))
2165 revs = list(rl.revs(startrev, len(rl) - 1))
2165
2166
2166 def rlfh(rl):
2167 def rlfh(rl):
2167 if rl._inline:
2168 if rl._inline:
2168 return getsvfs(repo)(rl.indexfile)
2169 return getsvfs(repo)(rl.indexfile)
2169 else:
2170 else:
2170 return getsvfs(repo)(rl.datafile)
2171 return getsvfs(repo)(rl.datafile)
2171
2172
2172 def doread():
2173 def doread():
2173 rl.clearcaches()
2174 rl.clearcaches()
2174 for rev in revs:
2175 for rev in revs:
2175 segmentforrevs(rev, rev)
2176 segmentforrevs(rev, rev)
2176
2177
2177 def doreadcachedfh():
2178 def doreadcachedfh():
2178 rl.clearcaches()
2179 rl.clearcaches()
2179 fh = rlfh(rl)
2180 fh = rlfh(rl)
2180 for rev in revs:
2181 for rev in revs:
2181 segmentforrevs(rev, rev, df=fh)
2182 segmentforrevs(rev, rev, df=fh)
2182
2183
2183 def doreadbatch():
2184 def doreadbatch():
2184 rl.clearcaches()
2185 rl.clearcaches()
2185 segmentforrevs(revs[0], revs[-1])
2186 segmentforrevs(revs[0], revs[-1])
2186
2187
2187 def doreadbatchcachedfh():
2188 def doreadbatchcachedfh():
2188 rl.clearcaches()
2189 rl.clearcaches()
2189 fh = rlfh(rl)
2190 fh = rlfh(rl)
2190 segmentforrevs(revs[0], revs[-1], df=fh)
2191 segmentforrevs(revs[0], revs[-1], df=fh)
2191
2192
2192 def dochunk():
2193 def dochunk():
2193 rl.clearcaches()
2194 rl.clearcaches()
2194 fh = rlfh(rl)
2195 fh = rlfh(rl)
2195 for rev in revs:
2196 for rev in revs:
2196 rl._chunk(rev, df=fh)
2197 rl._chunk(rev, df=fh)
2197
2198
2198 chunks = [None]
2199 chunks = [None]
2199
2200
2200 def dochunkbatch():
2201 def dochunkbatch():
2201 rl.clearcaches()
2202 rl.clearcaches()
2202 fh = rlfh(rl)
2203 fh = rlfh(rl)
2203 # Save chunks as a side-effect.
2204 # Save chunks as a side-effect.
2204 chunks[0] = rl._chunks(revs, df=fh)
2205 chunks[0] = rl._chunks(revs, df=fh)
2205
2206
2206 def docompress(compressor):
2207 def docompress(compressor):
2207 rl.clearcaches()
2208 rl.clearcaches()
2208
2209
2209 try:
2210 try:
2210 # Swap in the requested compression engine.
2211 # Swap in the requested compression engine.
2211 oldcompressor = rl._compressor
2212 oldcompressor = rl._compressor
2212 rl._compressor = compressor
2213 rl._compressor = compressor
2213 for chunk in chunks[0]:
2214 for chunk in chunks[0]:
2214 rl.compress(chunk)
2215 rl.compress(chunk)
2215 finally:
2216 finally:
2216 rl._compressor = oldcompressor
2217 rl._compressor = oldcompressor
2217
2218
2218 benches = [
2219 benches = [
2219 (lambda: doread(), b'read'),
2220 (lambda: doread(), b'read'),
2220 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2221 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2221 (lambda: doreadbatch(), b'read batch'),
2222 (lambda: doreadbatch(), b'read batch'),
2222 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2223 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2223 (lambda: dochunk(), b'chunk'),
2224 (lambda: dochunk(), b'chunk'),
2224 (lambda: dochunkbatch(), b'chunk batch'),
2225 (lambda: dochunkbatch(), b'chunk batch'),
2225 ]
2226 ]
2226
2227
2227 for engine in sorted(engines):
2228 for engine in sorted(engines):
2228 compressor = util.compengines[engine].revlogcompressor()
2229 compressor = util.compengines[engine].revlogcompressor()
2229 benches.append((functools.partial(docompress, compressor),
2230 benches.append((functools.partial(docompress, compressor),
2230 b'compress w/ %s' % engine))
2231 b'compress w/ %s' % engine))
2231
2232
2232 for fn, title in benches:
2233 for fn, title in benches:
2233 timer, fm = gettimer(ui, opts)
2234 timer, fm = gettimer(ui, opts)
2234 timer(fn, title=title)
2235 timer(fn, title=title)
2235 fm.end()
2236 fm.end()
2236
2237
2237 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2238 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2238 [(b'', b'cache', False, b'use caches instead of clearing')],
2239 [(b'', b'cache', False, b'use caches instead of clearing')],
2239 b'-c|-m|FILE REV')
2240 b'-c|-m|FILE REV')
2240 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2241 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2241 """Benchmark obtaining a revlog revision.
2242 """Benchmark obtaining a revlog revision.
2242
2243
2243 Obtaining a revlog revision consists of roughly the following steps:
2244 Obtaining a revlog revision consists of roughly the following steps:
2244
2245
2245 1. Compute the delta chain
2246 1. Compute the delta chain
2246 2. Slice the delta chain if applicable
2247 2. Slice the delta chain if applicable
2247 3. Obtain the raw chunks for that delta chain
2248 3. Obtain the raw chunks for that delta chain
2248 4. Decompress each raw chunk
2249 4. Decompress each raw chunk
2249 5. Apply binary patches to obtain fulltext
2250 5. Apply binary patches to obtain fulltext
2250 6. Verify hash of fulltext
2251 6. Verify hash of fulltext
2251
2252
2252 This command measures the time spent in each of these phases.
2253 This command measures the time spent in each of these phases.
2253 """
2254 """
2254 opts = _byteskwargs(opts)
2255 opts = _byteskwargs(opts)
2255
2256
2256 if opts.get(b'changelog') or opts.get(b'manifest'):
2257 if opts.get(b'changelog') or opts.get(b'manifest'):
2257 file_, rev = None, file_
2258 file_, rev = None, file_
2258 elif rev is None:
2259 elif rev is None:
2259 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2260 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2260
2261
2261 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2262 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2262
2263
2263 # _chunkraw was renamed to _getsegmentforrevs.
2264 # _chunkraw was renamed to _getsegmentforrevs.
2264 try:
2265 try:
2265 segmentforrevs = r._getsegmentforrevs
2266 segmentforrevs = r._getsegmentforrevs
2266 except AttributeError:
2267 except AttributeError:
2267 segmentforrevs = r._chunkraw
2268 segmentforrevs = r._chunkraw
2268
2269
2269 node = r.lookup(rev)
2270 node = r.lookup(rev)
2270 rev = r.rev(node)
2271 rev = r.rev(node)
2271
2272
2272 def getrawchunks(data, chain):
2273 def getrawchunks(data, chain):
2273 start = r.start
2274 start = r.start
2274 length = r.length
2275 length = r.length
2275 inline = r._inline
2276 inline = r._inline
2276 iosize = r._io.size
2277 iosize = r._io.size
2277 buffer = util.buffer
2278 buffer = util.buffer
2278
2279
2279 chunks = []
2280 chunks = []
2280 ladd = chunks.append
2281 ladd = chunks.append
2281 for idx, item in enumerate(chain):
2282 for idx, item in enumerate(chain):
2282 offset = start(item[0])
2283 offset = start(item[0])
2283 bits = data[idx]
2284 bits = data[idx]
2284 for rev in item:
2285 for rev in item:
2285 chunkstart = start(rev)
2286 chunkstart = start(rev)
2286 if inline:
2287 if inline:
2287 chunkstart += (rev + 1) * iosize
2288 chunkstart += (rev + 1) * iosize
2288 chunklength = length(rev)
2289 chunklength = length(rev)
2289 ladd(buffer(bits, chunkstart - offset, chunklength))
2290 ladd(buffer(bits, chunkstart - offset, chunklength))
2290
2291
2291 return chunks
2292 return chunks
2292
2293
2293 def dodeltachain(rev):
2294 def dodeltachain(rev):
2294 if not cache:
2295 if not cache:
2295 r.clearcaches()
2296 r.clearcaches()
2296 r._deltachain(rev)
2297 r._deltachain(rev)
2297
2298
2298 def doread(chain):
2299 def doread(chain):
2299 if not cache:
2300 if not cache:
2300 r.clearcaches()
2301 r.clearcaches()
2301 for item in slicedchain:
2302 for item in slicedchain:
2302 segmentforrevs(item[0], item[-1])
2303 segmentforrevs(item[0], item[-1])
2303
2304
2304 def doslice(r, chain, size):
2305 def doslice(r, chain, size):
2305 for s in slicechunk(r, chain, targetsize=size):
2306 for s in slicechunk(r, chain, targetsize=size):
2306 pass
2307 pass
2307
2308
2308 def dorawchunks(data, chain):
2309 def dorawchunks(data, chain):
2309 if not cache:
2310 if not cache:
2310 r.clearcaches()
2311 r.clearcaches()
2311 getrawchunks(data, chain)
2312 getrawchunks(data, chain)
2312
2313
2313 def dodecompress(chunks):
2314 def dodecompress(chunks):
2314 decomp = r.decompress
2315 decomp = r.decompress
2315 for chunk in chunks:
2316 for chunk in chunks:
2316 decomp(chunk)
2317 decomp(chunk)
2317
2318
2318 def dopatch(text, bins):
2319 def dopatch(text, bins):
2319 if not cache:
2320 if not cache:
2320 r.clearcaches()
2321 r.clearcaches()
2321 mdiff.patches(text, bins)
2322 mdiff.patches(text, bins)
2322
2323
2323 def dohash(text):
2324 def dohash(text):
2324 if not cache:
2325 if not cache:
2325 r.clearcaches()
2326 r.clearcaches()
2326 r.checkhash(text, node, rev=rev)
2327 r.checkhash(text, node, rev=rev)
2327
2328
2328 def dorevision():
2329 def dorevision():
2329 if not cache:
2330 if not cache:
2330 r.clearcaches()
2331 r.clearcaches()
2331 r.revision(node)
2332 r.revision(node)
2332
2333
2333 try:
2334 try:
2334 from mercurial.revlogutils.deltas import slicechunk
2335 from mercurial.revlogutils.deltas import slicechunk
2335 except ImportError:
2336 except ImportError:
2336 slicechunk = getattr(revlog, '_slicechunk', None)
2337 slicechunk = getattr(revlog, '_slicechunk', None)
2337
2338
2338 size = r.length(rev)
2339 size = r.length(rev)
2339 chain = r._deltachain(rev)[0]
2340 chain = r._deltachain(rev)[0]
2340 if not getattr(r, '_withsparseread', False):
2341 if not getattr(r, '_withsparseread', False):
2341 slicedchain = (chain,)
2342 slicedchain = (chain,)
2342 else:
2343 else:
2343 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2344 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2344 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2345 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2345 rawchunks = getrawchunks(data, slicedchain)
2346 rawchunks = getrawchunks(data, slicedchain)
2346 bins = r._chunks(chain)
2347 bins = r._chunks(chain)
2347 text = bytes(bins[0])
2348 text = bytes(bins[0])
2348 bins = bins[1:]
2349 bins = bins[1:]
2349 text = mdiff.patches(text, bins)
2350 text = mdiff.patches(text, bins)
2350
2351
2351 benches = [
2352 benches = [
2352 (lambda: dorevision(), b'full'),
2353 (lambda: dorevision(), b'full'),
2353 (lambda: dodeltachain(rev), b'deltachain'),
2354 (lambda: dodeltachain(rev), b'deltachain'),
2354 (lambda: doread(chain), b'read'),
2355 (lambda: doread(chain), b'read'),
2355 ]
2356 ]
2356
2357
2357 if getattr(r, '_withsparseread', False):
2358 if getattr(r, '_withsparseread', False):
2358 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2359 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2359 benches.append(slicing)
2360 benches.append(slicing)
2360
2361
2361 benches.extend([
2362 benches.extend([
2362 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2363 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2363 (lambda: dodecompress(rawchunks), b'decompress'),
2364 (lambda: dodecompress(rawchunks), b'decompress'),
2364 (lambda: dopatch(text, bins), b'patch'),
2365 (lambda: dopatch(text, bins), b'patch'),
2365 (lambda: dohash(text), b'hash'),
2366 (lambda: dohash(text), b'hash'),
2366 ])
2367 ])
2367
2368
2368 timer, fm = gettimer(ui, opts)
2369 timer, fm = gettimer(ui, opts)
2369 for fn, title in benches:
2370 for fn, title in benches:
2370 timer(fn, title=title)
2371 timer(fn, title=title)
2371 fm.end()
2372 fm.end()
2372
2373
2373 @command(b'perfrevset',
2374 @command(b'perfrevset',
2374 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2375 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2375 (b'', b'contexts', False, b'obtain changectx for each revision')]
2376 (b'', b'contexts', False, b'obtain changectx for each revision')]
2376 + formatteropts, b"REVSET")
2377 + formatteropts, b"REVSET")
2377 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2378 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2378 """benchmark the execution time of a revset
2379 """benchmark the execution time of a revset
2379
2380
2380 Use the --clean option if need to evaluate the impact of build volatile
2381 Use the --clean option if need to evaluate the impact of build volatile
2381 revisions set cache on the revset execution. Volatile cache hold filtered
2382 revisions set cache on the revset execution. Volatile cache hold filtered
2382 and obsolete related cache."""
2383 and obsolete related cache."""
2383 opts = _byteskwargs(opts)
2384 opts = _byteskwargs(opts)
2384
2385
2385 timer, fm = gettimer(ui, opts)
2386 timer, fm = gettimer(ui, opts)
2386 def d():
2387 def d():
2387 if clear:
2388 if clear:
2388 repo.invalidatevolatilesets()
2389 repo.invalidatevolatilesets()
2389 if contexts:
2390 if contexts:
2390 for ctx in repo.set(expr): pass
2391 for ctx in repo.set(expr): pass
2391 else:
2392 else:
2392 for r in repo.revs(expr): pass
2393 for r in repo.revs(expr): pass
2393 timer(d)
2394 timer(d)
2394 fm.end()
2395 fm.end()
2395
2396
2396 @command(b'perfvolatilesets',
2397 @command(b'perfvolatilesets',
2397 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2398 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2398 ] + formatteropts)
2399 ] + formatteropts)
2399 def perfvolatilesets(ui, repo, *names, **opts):
2400 def perfvolatilesets(ui, repo, *names, **opts):
2400 """benchmark the computation of various volatile set
2401 """benchmark the computation of various volatile set
2401
2402
2402 Volatile set computes element related to filtering and obsolescence."""
2403 Volatile set computes element related to filtering and obsolescence."""
2403 opts = _byteskwargs(opts)
2404 opts = _byteskwargs(opts)
2404 timer, fm = gettimer(ui, opts)
2405 timer, fm = gettimer(ui, opts)
2405 repo = repo.unfiltered()
2406 repo = repo.unfiltered()
2406
2407
2407 def getobs(name):
2408 def getobs(name):
2408 def d():
2409 def d():
2409 repo.invalidatevolatilesets()
2410 repo.invalidatevolatilesets()
2410 if opts[b'clear_obsstore']:
2411 if opts[b'clear_obsstore']:
2411 clearfilecache(repo, b'obsstore')
2412 clearfilecache(repo, b'obsstore')
2412 obsolete.getrevs(repo, name)
2413 obsolete.getrevs(repo, name)
2413 return d
2414 return d
2414
2415
2415 allobs = sorted(obsolete.cachefuncs)
2416 allobs = sorted(obsolete.cachefuncs)
2416 if names:
2417 if names:
2417 allobs = [n for n in allobs if n in names]
2418 allobs = [n for n in allobs if n in names]
2418
2419
2419 for name in allobs:
2420 for name in allobs:
2420 timer(getobs(name), title=name)
2421 timer(getobs(name), title=name)
2421
2422
2422 def getfiltered(name):
2423 def getfiltered(name):
2423 def d():
2424 def d():
2424 repo.invalidatevolatilesets()
2425 repo.invalidatevolatilesets()
2425 if opts[b'clear_obsstore']:
2426 if opts[b'clear_obsstore']:
2426 clearfilecache(repo, b'obsstore')
2427 clearfilecache(repo, b'obsstore')
2427 repoview.filterrevs(repo, name)
2428 repoview.filterrevs(repo, name)
2428 return d
2429 return d
2429
2430
2430 allfilter = sorted(repoview.filtertable)
2431 allfilter = sorted(repoview.filtertable)
2431 if names:
2432 if names:
2432 allfilter = [n for n in allfilter if n in names]
2433 allfilter = [n for n in allfilter if n in names]
2433
2434
2434 for name in allfilter:
2435 for name in allfilter:
2435 timer(getfiltered(name), title=name)
2436 timer(getfiltered(name), title=name)
2436 fm.end()
2437 fm.end()
2437
2438
2438 @command(b'perfbranchmap',
2439 @command(b'perfbranchmap',
2439 [(b'f', b'full', False,
2440 [(b'f', b'full', False,
2440 b'Includes build time of subset'),
2441 b'Includes build time of subset'),
2441 (b'', b'clear-revbranch', False,
2442 (b'', b'clear-revbranch', False,
2442 b'purge the revbranch cache between computation'),
2443 b'purge the revbranch cache between computation'),
2443 ] + formatteropts)
2444 ] + formatteropts)
2444 def perfbranchmap(ui, repo, *filternames, **opts):
2445 def perfbranchmap(ui, repo, *filternames, **opts):
2445 """benchmark the update of a branchmap
2446 """benchmark the update of a branchmap
2446
2447
2447 This benchmarks the full repo.branchmap() call with read and write disabled
2448 This benchmarks the full repo.branchmap() call with read and write disabled
2448 """
2449 """
2449 opts = _byteskwargs(opts)
2450 opts = _byteskwargs(opts)
2450 full = opts.get(b"full", False)
2451 full = opts.get(b"full", False)
2451 clear_revbranch = opts.get(b"clear_revbranch", False)
2452 clear_revbranch = opts.get(b"clear_revbranch", False)
2452 timer, fm = gettimer(ui, opts)
2453 timer, fm = gettimer(ui, opts)
2453 def getbranchmap(filtername):
2454 def getbranchmap(filtername):
2454 """generate a benchmark function for the filtername"""
2455 """generate a benchmark function for the filtername"""
2455 if filtername is None:
2456 if filtername is None:
2456 view = repo
2457 view = repo
2457 else:
2458 else:
2458 view = repo.filtered(filtername)
2459 view = repo.filtered(filtername)
2459 if util.safehasattr(view._branchcaches, '_per_filter'):
2460 if util.safehasattr(view._branchcaches, '_per_filter'):
2460 filtered = view._branchcaches._per_filter
2461 filtered = view._branchcaches._per_filter
2461 else:
2462 else:
2462 # older versions
2463 # older versions
2463 filtered = view._branchcaches
2464 filtered = view._branchcaches
2464 def d():
2465 def d():
2465 if clear_revbranch:
2466 if clear_revbranch:
2466 repo.revbranchcache()._clear()
2467 repo.revbranchcache()._clear()
2467 if full:
2468 if full:
2468 view._branchcaches.clear()
2469 view._branchcaches.clear()
2469 else:
2470 else:
2470 filtered.pop(filtername, None)
2471 filtered.pop(filtername, None)
2471 view.branchmap()
2472 view.branchmap()
2472 return d
2473 return d
2473 # add filter in smaller subset to bigger subset
2474 # add filter in smaller subset to bigger subset
2474 possiblefilters = set(repoview.filtertable)
2475 possiblefilters = set(repoview.filtertable)
2475 if filternames:
2476 if filternames:
2476 possiblefilters &= set(filternames)
2477 possiblefilters &= set(filternames)
2477 subsettable = getbranchmapsubsettable()
2478 subsettable = getbranchmapsubsettable()
2478 allfilters = []
2479 allfilters = []
2479 while possiblefilters:
2480 while possiblefilters:
2480 for name in possiblefilters:
2481 for name in possiblefilters:
2481 subset = subsettable.get(name)
2482 subset = subsettable.get(name)
2482 if subset not in possiblefilters:
2483 if subset not in possiblefilters:
2483 break
2484 break
2484 else:
2485 else:
2485 assert False, b'subset cycle %s!' % possiblefilters
2486 assert False, b'subset cycle %s!' % possiblefilters
2486 allfilters.append(name)
2487 allfilters.append(name)
2487 possiblefilters.remove(name)
2488 possiblefilters.remove(name)
2488
2489
2489 # warm the cache
2490 # warm the cache
2490 if not full:
2491 if not full:
2491 for name in allfilters:
2492 for name in allfilters:
2492 repo.filtered(name).branchmap()
2493 repo.filtered(name).branchmap()
2493 if not filternames or b'unfiltered' in filternames:
2494 if not filternames or b'unfiltered' in filternames:
2494 # add unfiltered
2495 # add unfiltered
2495 allfilters.append(None)
2496 allfilters.append(None)
2496
2497
2497 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2498 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2498 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2499 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2499 branchcacheread.set(classmethod(lambda *args: None))
2500 branchcacheread.set(classmethod(lambda *args: None))
2500 else:
2501 else:
2501 # older versions
2502 # older versions
2502 branchcacheread = safeattrsetter(branchmap, b'read')
2503 branchcacheread = safeattrsetter(branchmap, b'read')
2503 branchcacheread.set(lambda *args: None)
2504 branchcacheread.set(lambda *args: None)
2504 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2505 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2505 branchcachewrite.set(lambda *args: None)
2506 branchcachewrite.set(lambda *args: None)
2506 try:
2507 try:
2507 for name in allfilters:
2508 for name in allfilters:
2508 printname = name
2509 printname = name
2509 if name is None:
2510 if name is None:
2510 printname = b'unfiltered'
2511 printname = b'unfiltered'
2511 timer(getbranchmap(name), title=str(printname))
2512 timer(getbranchmap(name), title=str(printname))
2512 finally:
2513 finally:
2513 branchcacheread.restore()
2514 branchcacheread.restore()
2514 branchcachewrite.restore()
2515 branchcachewrite.restore()
2515 fm.end()
2516 fm.end()
2516
2517
2517 @command(b'perfbranchmapupdate', [
2518 @command(b'perfbranchmapupdate', [
2518 (b'', b'base', [], b'subset of revision to start from'),
2519 (b'', b'base', [], b'subset of revision to start from'),
2519 (b'', b'target', [], b'subset of revision to end with'),
2520 (b'', b'target', [], b'subset of revision to end with'),
2520 (b'', b'clear-caches', False, b'clear cache between each runs')
2521 (b'', b'clear-caches', False, b'clear cache between each runs')
2521 ] + formatteropts)
2522 ] + formatteropts)
2522 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2523 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2523 """benchmark branchmap update from for <base> revs to <target> revs
2524 """benchmark branchmap update from for <base> revs to <target> revs
2524
2525
2525 If `--clear-caches` is passed, the following items will be reset before
2526 If `--clear-caches` is passed, the following items will be reset before
2526 each update:
2527 each update:
2527 * the changelog instance and associated indexes
2528 * the changelog instance and associated indexes
2528 * the rev-branch-cache instance
2529 * the rev-branch-cache instance
2529
2530
2530 Examples:
2531 Examples:
2531
2532
2532 # update for the one last revision
2533 # update for the one last revision
2533 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2534 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2534
2535
2535 $ update for change coming with a new branch
2536 $ update for change coming with a new branch
2536 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2537 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2537 """
2538 """
2538 from mercurial import branchmap
2539 from mercurial import branchmap
2539 from mercurial import repoview
2540 from mercurial import repoview
2540 opts = _byteskwargs(opts)
2541 opts = _byteskwargs(opts)
2541 timer, fm = gettimer(ui, opts)
2542 timer, fm = gettimer(ui, opts)
2542 clearcaches = opts[b'clear_caches']
2543 clearcaches = opts[b'clear_caches']
2543 unfi = repo.unfiltered()
2544 unfi = repo.unfiltered()
2544 x = [None] # used to pass data between closure
2545 x = [None] # used to pass data between closure
2545
2546
2546 # we use a `list` here to avoid possible side effect from smartset
2547 # we use a `list` here to avoid possible side effect from smartset
2547 baserevs = list(scmutil.revrange(repo, base))
2548 baserevs = list(scmutil.revrange(repo, base))
2548 targetrevs = list(scmutil.revrange(repo, target))
2549 targetrevs = list(scmutil.revrange(repo, target))
2549 if not baserevs:
2550 if not baserevs:
2550 raise error.Abort(b'no revisions selected for --base')
2551 raise error.Abort(b'no revisions selected for --base')
2551 if not targetrevs:
2552 if not targetrevs:
2552 raise error.Abort(b'no revisions selected for --target')
2553 raise error.Abort(b'no revisions selected for --target')
2553
2554
2554 # make sure the target branchmap also contains the one in the base
2555 # make sure the target branchmap also contains the one in the base
2555 targetrevs = list(set(baserevs) | set(targetrevs))
2556 targetrevs = list(set(baserevs) | set(targetrevs))
2556 targetrevs.sort()
2557 targetrevs.sort()
2557
2558
2558 cl = repo.changelog
2559 cl = repo.changelog
2559 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2560 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2560 allbaserevs.sort()
2561 allbaserevs.sort()
2561 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2562 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2562
2563
2563 newrevs = list(alltargetrevs.difference(allbaserevs))
2564 newrevs = list(alltargetrevs.difference(allbaserevs))
2564 newrevs.sort()
2565 newrevs.sort()
2565
2566
2566 allrevs = frozenset(unfi.changelog.revs())
2567 allrevs = frozenset(unfi.changelog.revs())
2567 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2568 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2568 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2569 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2569
2570
2570 def basefilter(repo, visibilityexceptions=None):
2571 def basefilter(repo, visibilityexceptions=None):
2571 return basefilterrevs
2572 return basefilterrevs
2572
2573
2573 def targetfilter(repo, visibilityexceptions=None):
2574 def targetfilter(repo, visibilityexceptions=None):
2574 return targetfilterrevs
2575 return targetfilterrevs
2575
2576
2576 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2577 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2577 ui.status(msg % (len(allbaserevs), len(newrevs)))
2578 ui.status(msg % (len(allbaserevs), len(newrevs)))
2578 if targetfilterrevs:
2579 if targetfilterrevs:
2579 msg = b'(%d revisions still filtered)\n'
2580 msg = b'(%d revisions still filtered)\n'
2580 ui.status(msg % len(targetfilterrevs))
2581 ui.status(msg % len(targetfilterrevs))
2581
2582
2582 try:
2583 try:
2583 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2584 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2584 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2585 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2585
2586
2586 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2587 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2587 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2588 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2588
2589
2589 # try to find an existing branchmap to reuse
2590 # try to find an existing branchmap to reuse
2590 subsettable = getbranchmapsubsettable()
2591 subsettable = getbranchmapsubsettable()
2591 candidatefilter = subsettable.get(None)
2592 candidatefilter = subsettable.get(None)
2592 while candidatefilter is not None:
2593 while candidatefilter is not None:
2593 candidatebm = repo.filtered(candidatefilter).branchmap()
2594 candidatebm = repo.filtered(candidatefilter).branchmap()
2594 if candidatebm.validfor(baserepo):
2595 if candidatebm.validfor(baserepo):
2595 filtered = repoview.filterrevs(repo, candidatefilter)
2596 filtered = repoview.filterrevs(repo, candidatefilter)
2596 missing = [r for r in allbaserevs if r in filtered]
2597 missing = [r for r in allbaserevs if r in filtered]
2597 base = candidatebm.copy()
2598 base = candidatebm.copy()
2598 base.update(baserepo, missing)
2599 base.update(baserepo, missing)
2599 break
2600 break
2600 candidatefilter = subsettable.get(candidatefilter)
2601 candidatefilter = subsettable.get(candidatefilter)
2601 else:
2602 else:
2602 # no suitable subset where found
2603 # no suitable subset where found
2603 base = branchmap.branchcache()
2604 base = branchmap.branchcache()
2604 base.update(baserepo, allbaserevs)
2605 base.update(baserepo, allbaserevs)
2605
2606
2606 def setup():
2607 def setup():
2607 x[0] = base.copy()
2608 x[0] = base.copy()
2608 if clearcaches:
2609 if clearcaches:
2609 unfi._revbranchcache = None
2610 unfi._revbranchcache = None
2610 clearchangelog(repo)
2611 clearchangelog(repo)
2611
2612
2612 def bench():
2613 def bench():
2613 x[0].update(targetrepo, newrevs)
2614 x[0].update(targetrepo, newrevs)
2614
2615
2615 timer(bench, setup=setup)
2616 timer(bench, setup=setup)
2616 fm.end()
2617 fm.end()
2617 finally:
2618 finally:
2618 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2619 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2619 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2620 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2620
2621
2621 @command(b'perfbranchmapload', [
2622 @command(b'perfbranchmapload', [
2622 (b'f', b'filter', b'', b'Specify repoview filter'),
2623 (b'f', b'filter', b'', b'Specify repoview filter'),
2623 (b'', b'list', False, b'List brachmap filter caches'),
2624 (b'', b'list', False, b'List brachmap filter caches'),
2624 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2625 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2625
2626
2626 ] + formatteropts)
2627 ] + formatteropts)
2627 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2628 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2628 """benchmark reading the branchmap"""
2629 """benchmark reading the branchmap"""
2629 opts = _byteskwargs(opts)
2630 opts = _byteskwargs(opts)
2630 clearrevlogs = opts[b'clear_revlogs']
2631 clearrevlogs = opts[b'clear_revlogs']
2631
2632
2632 if list:
2633 if list:
2633 for name, kind, st in repo.cachevfs.readdir(stat=True):
2634 for name, kind, st in repo.cachevfs.readdir(stat=True):
2634 if name.startswith(b'branch2'):
2635 if name.startswith(b'branch2'):
2635 filtername = name.partition(b'-')[2] or b'unfiltered'
2636 filtername = name.partition(b'-')[2] or b'unfiltered'
2636 ui.status(b'%s - %s\n'
2637 ui.status(b'%s - %s\n'
2637 % (filtername, util.bytecount(st.st_size)))
2638 % (filtername, util.bytecount(st.st_size)))
2638 return
2639 return
2639 if not filter:
2640 if not filter:
2640 filter = None
2641 filter = None
2641 subsettable = getbranchmapsubsettable()
2642 subsettable = getbranchmapsubsettable()
2642 if filter is None:
2643 if filter is None:
2643 repo = repo.unfiltered()
2644 repo = repo.unfiltered()
2644 else:
2645 else:
2645 repo = repoview.repoview(repo, filter)
2646 repo = repoview.repoview(repo, filter)
2646
2647
2647 repo.branchmap() # make sure we have a relevant, up to date branchmap
2648 repo.branchmap() # make sure we have a relevant, up to date branchmap
2648
2649
2649 try:
2650 try:
2650 fromfile = branchmap.branchcache.fromfile
2651 fromfile = branchmap.branchcache.fromfile
2651 except AttributeError:
2652 except AttributeError:
2652 # older versions
2653 # older versions
2653 fromfile = branchmap.read
2654 fromfile = branchmap.read
2654
2655
2655 currentfilter = filter
2656 currentfilter = filter
2656 # try once without timer, the filter may not be cached
2657 # try once without timer, the filter may not be cached
2657 while fromfile(repo) is None:
2658 while fromfile(repo) is None:
2658 currentfilter = subsettable.get(currentfilter)
2659 currentfilter = subsettable.get(currentfilter)
2659 if currentfilter is None:
2660 if currentfilter is None:
2660 raise error.Abort(b'No branchmap cached for %s repo'
2661 raise error.Abort(b'No branchmap cached for %s repo'
2661 % (filter or b'unfiltered'))
2662 % (filter or b'unfiltered'))
2662 repo = repo.filtered(currentfilter)
2663 repo = repo.filtered(currentfilter)
2663 timer, fm = gettimer(ui, opts)
2664 timer, fm = gettimer(ui, opts)
2664 def setup():
2665 def setup():
2665 if clearrevlogs:
2666 if clearrevlogs:
2666 clearchangelog(repo)
2667 clearchangelog(repo)
2667 def bench():
2668 def bench():
2668 fromfile(repo)
2669 fromfile(repo)
2669 timer(bench, setup=setup)
2670 timer(bench, setup=setup)
2670 fm.end()
2671 fm.end()
2671
2672
2672 @command(b'perfloadmarkers')
2673 @command(b'perfloadmarkers')
2673 def perfloadmarkers(ui, repo):
2674 def perfloadmarkers(ui, repo):
2674 """benchmark the time to parse the on-disk markers for a repo
2675 """benchmark the time to parse the on-disk markers for a repo
2675
2676
2676 Result is the number of markers in the repo."""
2677 Result is the number of markers in the repo."""
2677 timer, fm = gettimer(ui)
2678 timer, fm = gettimer(ui)
2678 svfs = getsvfs(repo)
2679 svfs = getsvfs(repo)
2679 timer(lambda: len(obsolete.obsstore(svfs)))
2680 timer(lambda: len(obsolete.obsstore(svfs)))
2680 fm.end()
2681 fm.end()
2681
2682
2682 @command(b'perflrucachedict', formatteropts +
2683 @command(b'perflrucachedict', formatteropts +
2683 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2684 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2684 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2685 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2685 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2686 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2686 (b'', b'size', 4, b'size of cache'),
2687 (b'', b'size', 4, b'size of cache'),
2687 (b'', b'gets', 10000, b'number of key lookups'),
2688 (b'', b'gets', 10000, b'number of key lookups'),
2688 (b'', b'sets', 10000, b'number of key sets'),
2689 (b'', b'sets', 10000, b'number of key sets'),
2689 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2690 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2690 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2691 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2691 norepo=True)
2692 norepo=True)
2692 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2693 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2693 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2694 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2694 opts = _byteskwargs(opts)
2695 opts = _byteskwargs(opts)
2695
2696
2696 def doinit():
2697 def doinit():
2697 for i in _xrange(10000):
2698 for i in _xrange(10000):
2698 util.lrucachedict(size)
2699 util.lrucachedict(size)
2699
2700
2700 costrange = list(range(mincost, maxcost + 1))
2701 costrange = list(range(mincost, maxcost + 1))
2701
2702
2702 values = []
2703 values = []
2703 for i in _xrange(size):
2704 for i in _xrange(size):
2704 values.append(random.randint(0, _maxint))
2705 values.append(random.randint(0, _maxint))
2705
2706
2706 # Get mode fills the cache and tests raw lookup performance with no
2707 # Get mode fills the cache and tests raw lookup performance with no
2707 # eviction.
2708 # eviction.
2708 getseq = []
2709 getseq = []
2709 for i in _xrange(gets):
2710 for i in _xrange(gets):
2710 getseq.append(random.choice(values))
2711 getseq.append(random.choice(values))
2711
2712
2712 def dogets():
2713 def dogets():
2713 d = util.lrucachedict(size)
2714 d = util.lrucachedict(size)
2714 for v in values:
2715 for v in values:
2715 d[v] = v
2716 d[v] = v
2716 for key in getseq:
2717 for key in getseq:
2717 value = d[key]
2718 value = d[key]
2718 value # silence pyflakes warning
2719 value # silence pyflakes warning
2719
2720
2720 def dogetscost():
2721 def dogetscost():
2721 d = util.lrucachedict(size, maxcost=costlimit)
2722 d = util.lrucachedict(size, maxcost=costlimit)
2722 for i, v in enumerate(values):
2723 for i, v in enumerate(values):
2723 d.insert(v, v, cost=costs[i])
2724 d.insert(v, v, cost=costs[i])
2724 for key in getseq:
2725 for key in getseq:
2725 try:
2726 try:
2726 value = d[key]
2727 value = d[key]
2727 value # silence pyflakes warning
2728 value # silence pyflakes warning
2728 except KeyError:
2729 except KeyError:
2729 pass
2730 pass
2730
2731
2731 # Set mode tests insertion speed with cache eviction.
2732 # Set mode tests insertion speed with cache eviction.
2732 setseq = []
2733 setseq = []
2733 costs = []
2734 costs = []
2734 for i in _xrange(sets):
2735 for i in _xrange(sets):
2735 setseq.append(random.randint(0, _maxint))
2736 setseq.append(random.randint(0, _maxint))
2736 costs.append(random.choice(costrange))
2737 costs.append(random.choice(costrange))
2737
2738
2738 def doinserts():
2739 def doinserts():
2739 d = util.lrucachedict(size)
2740 d = util.lrucachedict(size)
2740 for v in setseq:
2741 for v in setseq:
2741 d.insert(v, v)
2742 d.insert(v, v)
2742
2743
2743 def doinsertscost():
2744 def doinsertscost():
2744 d = util.lrucachedict(size, maxcost=costlimit)
2745 d = util.lrucachedict(size, maxcost=costlimit)
2745 for i, v in enumerate(setseq):
2746 for i, v in enumerate(setseq):
2746 d.insert(v, v, cost=costs[i])
2747 d.insert(v, v, cost=costs[i])
2747
2748
2748 def dosets():
2749 def dosets():
2749 d = util.lrucachedict(size)
2750 d = util.lrucachedict(size)
2750 for v in setseq:
2751 for v in setseq:
2751 d[v] = v
2752 d[v] = v
2752
2753
2753 # Mixed mode randomly performs gets and sets with eviction.
2754 # Mixed mode randomly performs gets and sets with eviction.
2754 mixedops = []
2755 mixedops = []
2755 for i in _xrange(mixed):
2756 for i in _xrange(mixed):
2756 r = random.randint(0, 100)
2757 r = random.randint(0, 100)
2757 if r < mixedgetfreq:
2758 if r < mixedgetfreq:
2758 op = 0
2759 op = 0
2759 else:
2760 else:
2760 op = 1
2761 op = 1
2761
2762
2762 mixedops.append((op,
2763 mixedops.append((op,
2763 random.randint(0, size * 2),
2764 random.randint(0, size * 2),
2764 random.choice(costrange)))
2765 random.choice(costrange)))
2765
2766
2766 def domixed():
2767 def domixed():
2767 d = util.lrucachedict(size)
2768 d = util.lrucachedict(size)
2768
2769
2769 for op, v, cost in mixedops:
2770 for op, v, cost in mixedops:
2770 if op == 0:
2771 if op == 0:
2771 try:
2772 try:
2772 d[v]
2773 d[v]
2773 except KeyError:
2774 except KeyError:
2774 pass
2775 pass
2775 else:
2776 else:
2776 d[v] = v
2777 d[v] = v
2777
2778
2778 def domixedcost():
2779 def domixedcost():
2779 d = util.lrucachedict(size, maxcost=costlimit)
2780 d = util.lrucachedict(size, maxcost=costlimit)
2780
2781
2781 for op, v, cost in mixedops:
2782 for op, v, cost in mixedops:
2782 if op == 0:
2783 if op == 0:
2783 try:
2784 try:
2784 d[v]
2785 d[v]
2785 except KeyError:
2786 except KeyError:
2786 pass
2787 pass
2787 else:
2788 else:
2788 d.insert(v, v, cost=cost)
2789 d.insert(v, v, cost=cost)
2789
2790
2790 benches = [
2791 benches = [
2791 (doinit, b'init'),
2792 (doinit, b'init'),
2792 ]
2793 ]
2793
2794
2794 if costlimit:
2795 if costlimit:
2795 benches.extend([
2796 benches.extend([
2796 (dogetscost, b'gets w/ cost limit'),
2797 (dogetscost, b'gets w/ cost limit'),
2797 (doinsertscost, b'inserts w/ cost limit'),
2798 (doinsertscost, b'inserts w/ cost limit'),
2798 (domixedcost, b'mixed w/ cost limit'),
2799 (domixedcost, b'mixed w/ cost limit'),
2799 ])
2800 ])
2800 else:
2801 else:
2801 benches.extend([
2802 benches.extend([
2802 (dogets, b'gets'),
2803 (dogets, b'gets'),
2803 (doinserts, b'inserts'),
2804 (doinserts, b'inserts'),
2804 (dosets, b'sets'),
2805 (dosets, b'sets'),
2805 (domixed, b'mixed')
2806 (domixed, b'mixed')
2806 ])
2807 ])
2807
2808
2808 for fn, title in benches:
2809 for fn, title in benches:
2809 timer, fm = gettimer(ui, opts)
2810 timer, fm = gettimer(ui, opts)
2810 timer(fn, title=title)
2811 timer(fn, title=title)
2811 fm.end()
2812 fm.end()
2812
2813
2813 @command(b'perfwrite', formatteropts)
2814 @command(b'perfwrite', formatteropts)
2814 def perfwrite(ui, repo, **opts):
2815 def perfwrite(ui, repo, **opts):
2815 """microbenchmark ui.write
2816 """microbenchmark ui.write
2816 """
2817 """
2817 opts = _byteskwargs(opts)
2818 opts = _byteskwargs(opts)
2818
2819
2819 timer, fm = gettimer(ui, opts)
2820 timer, fm = gettimer(ui, opts)
2820 def write():
2821 def write():
2821 for i in range(100000):
2822 for i in range(100000):
2822 ui.write((b'Testing write performance\n'))
2823 ui.write((b'Testing write performance\n'))
2823 timer(write)
2824 timer(write)
2824 fm.end()
2825 fm.end()
2825
2826
2826 def uisetup(ui):
2827 def uisetup(ui):
2827 if (util.safehasattr(cmdutil, b'openrevlog') and
2828 if (util.safehasattr(cmdutil, b'openrevlog') and
2828 not util.safehasattr(commands, b'debugrevlogopts')):
2829 not util.safehasattr(commands, b'debugrevlogopts')):
2829 # for "historical portability":
2830 # for "historical portability":
2830 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2831 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2831 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2832 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2832 # openrevlog() should cause failure, because it has been
2833 # openrevlog() should cause failure, because it has been
2833 # available since 3.5 (or 49c583ca48c4).
2834 # available since 3.5 (or 49c583ca48c4).
2834 def openrevlog(orig, repo, cmd, file_, opts):
2835 def openrevlog(orig, repo, cmd, file_, opts):
2835 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2836 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2836 raise error.Abort(b"This version doesn't support --dir option",
2837 raise error.Abort(b"This version doesn't support --dir option",
2837 hint=b"use 3.5 or later")
2838 hint=b"use 3.5 or later")
2838 return orig(repo, cmd, file_, opts)
2839 return orig(repo, cmd, file_, opts)
2839 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2840 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2840
2841
2841 @command(b'perfprogress', formatteropts + [
2842 @command(b'perfprogress', formatteropts + [
2842 (b'', b'topic', b'topic', b'topic for progress messages'),
2843 (b'', b'topic', b'topic', b'topic for progress messages'),
2843 (b'c', b'total', 1000000, b'total value we are progressing to'),
2844 (b'c', b'total', 1000000, b'total value we are progressing to'),
2844 ], norepo=True)
2845 ], norepo=True)
2845 def perfprogress(ui, topic=None, total=None, **opts):
2846 def perfprogress(ui, topic=None, total=None, **opts):
2846 """printing of progress bars"""
2847 """printing of progress bars"""
2847 opts = _byteskwargs(opts)
2848 opts = _byteskwargs(opts)
2848
2849
2849 timer, fm = gettimer(ui, opts)
2850 timer, fm = gettimer(ui, opts)
2850
2851
2851 def doprogress():
2852 def doprogress():
2852 with ui.makeprogress(topic, total=total) as progress:
2853 with ui.makeprogress(topic, total=total) as progress:
2853 for i in pycompat.xrange(total):
2854 for i in pycompat.xrange(total):
2854 progress.increment()
2855 progress.increment()
2855
2856
2856 timer(doprogress)
2857 timer(doprogress)
2857 fm.end()
2858 fm.end()
@@ -1,355 +1,355 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistic will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of run (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "run-limits"
58 "run-limits"
59 Control the number of run each benchmark will perform. The option value
59 Control the number of runs each benchmark will perform. The option value
60 should be a list of '<time>-<numberofrun>' pairs. After each run the
60 should be a list of '<time>-<numberofrun>' pairs. After each run the
61 condition are considered in order with the following logic:
61 conditions are considered in order with the following logic:
62
62
63 If benchmark have been running for <time> seconds, and we have performed
63 If benchmark has been running for <time> seconds, and we have performed
64 <numberofrun> iterations, stop the benchmark,
64 <numberofrun> iterations, stop the benchmark,
65
65
66 The default value is: '3.0-100, 10.0-3'
66 The default value is: '3.0-100, 10.0-3'
67
67
68 "stub"
68 "stub"
69 When set, benchmark will only be run once, useful for testing (default:
69 When set, benchmarks will only be run once, useful for testing (default:
70 off)
70 off)
71
71
72 list of commands:
72 list of commands:
73
73
74 perfaddremove
74 perfaddremove
75 (no help text available)
75 (no help text available)
76 perfancestors
76 perfancestors
77 (no help text available)
77 (no help text available)
78 perfancestorset
78 perfancestorset
79 (no help text available)
79 (no help text available)
80 perfannotate (no help text available)
80 perfannotate (no help text available)
81 perfbdiff benchmark a bdiff between revisions
81 perfbdiff benchmark a bdiff between revisions
82 perfbookmarks
82 perfbookmarks
83 benchmark parsing bookmarks from disk to memory
83 benchmark parsing bookmarks from disk to memory
84 perfbranchmap
84 perfbranchmap
85 benchmark the update of a branchmap
85 benchmark the update of a branchmap
86 perfbranchmapload
86 perfbranchmapload
87 benchmark reading the branchmap
87 benchmark reading the branchmap
88 perfbranchmapupdate
88 perfbranchmapupdate
89 benchmark branchmap update from for <base> revs to <target>
89 benchmark branchmap update from for <base> revs to <target>
90 revs
90 revs
91 perfbundleread
91 perfbundleread
92 Benchmark reading of bundle files.
92 Benchmark reading of bundle files.
93 perfcca (no help text available)
93 perfcca (no help text available)
94 perfchangegroupchangelog
94 perfchangegroupchangelog
95 Benchmark producing a changelog group for a changegroup.
95 Benchmark producing a changelog group for a changegroup.
96 perfchangeset
96 perfchangeset
97 (no help text available)
97 (no help text available)
98 perfctxfiles (no help text available)
98 perfctxfiles (no help text available)
99 perfdiffwd Profile diff of working directory changes
99 perfdiffwd Profile diff of working directory changes
100 perfdirfoldmap
100 perfdirfoldmap
101 (no help text available)
101 (no help text available)
102 perfdirs (no help text available)
102 perfdirs (no help text available)
103 perfdirstate (no help text available)
103 perfdirstate (no help text available)
104 perfdirstatedirs
104 perfdirstatedirs
105 (no help text available)
105 (no help text available)
106 perfdirstatefoldmap
106 perfdirstatefoldmap
107 (no help text available)
107 (no help text available)
108 perfdirstatewrite
108 perfdirstatewrite
109 (no help text available)
109 (no help text available)
110 perfdiscovery
110 perfdiscovery
111 benchmark discovery between local repo and the peer at given
111 benchmark discovery between local repo and the peer at given
112 path
112 path
113 perffncacheencode
113 perffncacheencode
114 (no help text available)
114 (no help text available)
115 perffncacheload
115 perffncacheload
116 (no help text available)
116 (no help text available)
117 perffncachewrite
117 perffncachewrite
118 (no help text available)
118 (no help text available)
119 perfheads benchmark the computation of a changelog heads
119 perfheads benchmark the computation of a changelog heads
120 perfhelper-pathcopies
120 perfhelper-pathcopies
121 find statistic about potential parameters for the
121 find statistic about potential parameters for the
122 'perftracecopies'
122 'perftracecopies'
123 perfignore benchmark operation related to computing ignore
123 perfignore benchmark operation related to computing ignore
124 perfindex benchmark index creation time followed by a lookup
124 perfindex benchmark index creation time followed by a lookup
125 perflinelogedits
125 perflinelogedits
126 (no help text available)
126 (no help text available)
127 perfloadmarkers
127 perfloadmarkers
128 benchmark the time to parse the on-disk markers for a repo
128 benchmark the time to parse the on-disk markers for a repo
129 perflog (no help text available)
129 perflog (no help text available)
130 perflookup (no help text available)
130 perflookup (no help text available)
131 perflrucachedict
131 perflrucachedict
132 (no help text available)
132 (no help text available)
133 perfmanifest benchmark the time to read a manifest from disk and return a
133 perfmanifest benchmark the time to read a manifest from disk and return a
134 usable
134 usable
135 perfmergecalculate
135 perfmergecalculate
136 (no help text available)
136 (no help text available)
137 perfmoonwalk benchmark walking the changelog backwards
137 perfmoonwalk benchmark walking the changelog backwards
138 perfnodelookup
138 perfnodelookup
139 (no help text available)
139 (no help text available)
140 perfnodemap benchmark the time necessary to look up revision from a cold
140 perfnodemap benchmark the time necessary to look up revision from a cold
141 nodemap
141 nodemap
142 perfparents benchmark the time necessary to fetch one changeset's parents.
142 perfparents benchmark the time necessary to fetch one changeset's parents.
143 perfpathcopies
143 perfpathcopies
144 benchmark the copy tracing logic
144 benchmark the copy tracing logic
145 perfphases benchmark phasesets computation
145 perfphases benchmark phasesets computation
146 perfphasesremote
146 perfphasesremote
147 benchmark time needed to analyse phases of the remote server
147 benchmark time needed to analyse phases of the remote server
148 perfprogress printing of progress bars
148 perfprogress printing of progress bars
149 perfrawfiles (no help text available)
149 perfrawfiles (no help text available)
150 perfrevlogchunks
150 perfrevlogchunks
151 Benchmark operations on revlog chunks.
151 Benchmark operations on revlog chunks.
152 perfrevlogindex
152 perfrevlogindex
153 Benchmark operations against a revlog index.
153 Benchmark operations against a revlog index.
154 perfrevlogrevision
154 perfrevlogrevision
155 Benchmark obtaining a revlog revision.
155 Benchmark obtaining a revlog revision.
156 perfrevlogrevisions
156 perfrevlogrevisions
157 Benchmark reading a series of revisions from a revlog.
157 Benchmark reading a series of revisions from a revlog.
158 perfrevlogwrite
158 perfrevlogwrite
159 Benchmark writing a series of revisions to a revlog.
159 Benchmark writing a series of revisions to a revlog.
160 perfrevrange (no help text available)
160 perfrevrange (no help text available)
161 perfrevset benchmark the execution time of a revset
161 perfrevset benchmark the execution time of a revset
162 perfstartup (no help text available)
162 perfstartup (no help text available)
163 perfstatus (no help text available)
163 perfstatus (no help text available)
164 perftags (no help text available)
164 perftags (no help text available)
165 perftemplating
165 perftemplating
166 test the rendering time of a given template
166 test the rendering time of a given template
167 perfunidiff benchmark a unified diff between revisions
167 perfunidiff benchmark a unified diff between revisions
168 perfvolatilesets
168 perfvolatilesets
169 benchmark the computation of various volatile set
169 benchmark the computation of various volatile set
170 perfwalk (no help text available)
170 perfwalk (no help text available)
171 perfwrite microbenchmark ui.write
171 perfwrite microbenchmark ui.write
172
172
173 (use 'hg help -v perf' to show built-in aliases and global options)
173 (use 'hg help -v perf' to show built-in aliases and global options)
174 $ hg perfaddremove
174 $ hg perfaddremove
175 $ hg perfancestors
175 $ hg perfancestors
176 $ hg perfancestorset 2
176 $ hg perfancestorset 2
177 $ hg perfannotate a
177 $ hg perfannotate a
178 $ hg perfbdiff -c 1
178 $ hg perfbdiff -c 1
179 $ hg perfbdiff --alldata 1
179 $ hg perfbdiff --alldata 1
180 $ hg perfunidiff -c 1
180 $ hg perfunidiff -c 1
181 $ hg perfunidiff --alldata 1
181 $ hg perfunidiff --alldata 1
182 $ hg perfbookmarks
182 $ hg perfbookmarks
183 $ hg perfbranchmap
183 $ hg perfbranchmap
184 $ hg perfbranchmapload
184 $ hg perfbranchmapload
185 $ hg perfbranchmapupdate --base "not tip" --target "tip"
185 $ hg perfbranchmapupdate --base "not tip" --target "tip"
186 benchmark of branchmap with 3 revisions with 1 new ones
186 benchmark of branchmap with 3 revisions with 1 new ones
187 $ hg perfcca
187 $ hg perfcca
188 $ hg perfchangegroupchangelog
188 $ hg perfchangegroupchangelog
189 $ hg perfchangegroupchangelog --cgversion 01
189 $ hg perfchangegroupchangelog --cgversion 01
190 $ hg perfchangeset 2
190 $ hg perfchangeset 2
191 $ hg perfctxfiles 2
191 $ hg perfctxfiles 2
192 $ hg perfdiffwd
192 $ hg perfdiffwd
193 $ hg perfdirfoldmap
193 $ hg perfdirfoldmap
194 $ hg perfdirs
194 $ hg perfdirs
195 $ hg perfdirstate
195 $ hg perfdirstate
196 $ hg perfdirstatedirs
196 $ hg perfdirstatedirs
197 $ hg perfdirstatefoldmap
197 $ hg perfdirstatefoldmap
198 $ hg perfdirstatewrite
198 $ hg perfdirstatewrite
199 #if repofncache
199 #if repofncache
200 $ hg perffncacheencode
200 $ hg perffncacheencode
201 $ hg perffncacheload
201 $ hg perffncacheload
202 $ hg debugrebuildfncache
202 $ hg debugrebuildfncache
203 fncache already up to date
203 fncache already up to date
204 $ hg perffncachewrite
204 $ hg perffncachewrite
205 $ hg debugrebuildfncache
205 $ hg debugrebuildfncache
206 fncache already up to date
206 fncache already up to date
207 #endif
207 #endif
208 $ hg perfheads
208 $ hg perfheads
209 $ hg perfignore
209 $ hg perfignore
210 $ hg perfindex
210 $ hg perfindex
211 $ hg perflinelogedits -n 1
211 $ hg perflinelogedits -n 1
212 $ hg perfloadmarkers
212 $ hg perfloadmarkers
213 $ hg perflog
213 $ hg perflog
214 $ hg perflookup 2
214 $ hg perflookup 2
215 $ hg perflrucache
215 $ hg perflrucache
216 $ hg perfmanifest 2
216 $ hg perfmanifest 2
217 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
217 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
218 $ hg perfmanifest -m 44fe2c8352bb
218 $ hg perfmanifest -m 44fe2c8352bb
219 abort: manifest revision must be integer or full node
219 abort: manifest revision must be integer or full node
220 [255]
220 [255]
221 $ hg perfmergecalculate -r 3
221 $ hg perfmergecalculate -r 3
222 $ hg perfmoonwalk
222 $ hg perfmoonwalk
223 $ hg perfnodelookup 2
223 $ hg perfnodelookup 2
224 $ hg perfpathcopies 1 2
224 $ hg perfpathcopies 1 2
225 $ hg perfprogress --total 1000
225 $ hg perfprogress --total 1000
226 $ hg perfrawfiles 2
226 $ hg perfrawfiles 2
227 $ hg perfrevlogindex -c
227 $ hg perfrevlogindex -c
228 #if reporevlogstore
228 #if reporevlogstore
229 $ hg perfrevlogrevisions .hg/store/data/a.i
229 $ hg perfrevlogrevisions .hg/store/data/a.i
230 #endif
230 #endif
231 $ hg perfrevlogrevision -m 0
231 $ hg perfrevlogrevision -m 0
232 $ hg perfrevlogchunks -c
232 $ hg perfrevlogchunks -c
233 $ hg perfrevrange
233 $ hg perfrevrange
234 $ hg perfrevset 'all()'
234 $ hg perfrevset 'all()'
235 $ hg perfstartup
235 $ hg perfstartup
236 $ hg perfstatus
236 $ hg perfstatus
237 $ hg perftags
237 $ hg perftags
238 $ hg perftemplating
238 $ hg perftemplating
239 $ hg perfvolatilesets
239 $ hg perfvolatilesets
240 $ hg perfwalk
240 $ hg perfwalk
241 $ hg perfparents
241 $ hg perfparents
242 $ hg perfdiscovery -q .
242 $ hg perfdiscovery -q .
243
243
244 Test run control
244 Test run control
245 ----------------
245 ----------------
246
246
247 Simple single entry
247 Simple single entry
248
248
249 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
249 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
250 ! wall * comb * user * sys * (best of 15) (glob)
250 ! wall * comb * user * sys * (best of 15) (glob)
251
251
252 Multiple entries
252 Multiple entries
253
253
254 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
254 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
255 ! wall * comb * user * sys * (best of 5) (glob)
255 ! wall * comb * user * sys * (best of 5) (glob)
256
256
257 error case are ignored
257 error case are ignored
258
258
259 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
259 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
260 malformatted run limit entry, missing "-": 500
260 malformatted run limit entry, missing "-": 500
261 ! wall * comb * user * sys * (best of 5) (glob)
261 ! wall * comb * user * sys * (best of 5) (glob)
262 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
262 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
263 malformatted run limit entry, could not convert string to float: aaa: aaa-12
263 malformatted run limit entry, could not convert string to float: aaa: aaa-12
264 ! wall * comb * user * sys * (best of 5) (glob)
264 ! wall * comb * user * sys * (best of 5) (glob)
265 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
265 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
266 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
266 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
267 ! wall * comb * user * sys * (best of 5) (glob)
267 ! wall * comb * user * sys * (best of 5) (glob)
268
268
269 test actual output
269 test actual output
270 ------------------
270 ------------------
271
271
272 normal output:
272 normal output:
273
273
274 $ hg perfheads --config perf.stub=no
274 $ hg perfheads --config perf.stub=no
275 ! wall * comb * user * sys * (best of *) (glob)
275 ! wall * comb * user * sys * (best of *) (glob)
276
276
277 detailed output:
277 detailed output:
278
278
279 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
279 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
280 ! wall * comb * user * sys * (best of *) (glob)
280 ! wall * comb * user * sys * (best of *) (glob)
281 ! wall * comb * user * sys * (max of *) (glob)
281 ! wall * comb * user * sys * (max of *) (glob)
282 ! wall * comb * user * sys * (avg of *) (glob)
282 ! wall * comb * user * sys * (avg of *) (glob)
283 ! wall * comb * user * sys * (median of *) (glob)
283 ! wall * comb * user * sys * (median of *) (glob)
284
284
285 test json output
285 test json output
286 ----------------
286 ----------------
287
287
288 normal output:
288 normal output:
289
289
290 $ hg perfheads --template json --config perf.stub=no
290 $ hg perfheads --template json --config perf.stub=no
291 [
291 [
292 {
292 {
293 "comb": *, (glob)
293 "comb": *, (glob)
294 "count": *, (glob)
294 "count": *, (glob)
295 "sys": *, (glob)
295 "sys": *, (glob)
296 "user": *, (glob)
296 "user": *, (glob)
297 "wall": * (glob)
297 "wall": * (glob)
298 }
298 }
299 ]
299 ]
300
300
301 detailed output:
301 detailed output:
302
302
303 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
303 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
304 [
304 [
305 {
305 {
306 "avg.comb": *, (glob)
306 "avg.comb": *, (glob)
307 "avg.count": *, (glob)
307 "avg.count": *, (glob)
308 "avg.sys": *, (glob)
308 "avg.sys": *, (glob)
309 "avg.user": *, (glob)
309 "avg.user": *, (glob)
310 "avg.wall": *, (glob)
310 "avg.wall": *, (glob)
311 "comb": *, (glob)
311 "comb": *, (glob)
312 "count": *, (glob)
312 "count": *, (glob)
313 "max.comb": *, (glob)
313 "max.comb": *, (glob)
314 "max.count": *, (glob)
314 "max.count": *, (glob)
315 "max.sys": *, (glob)
315 "max.sys": *, (glob)
316 "max.user": *, (glob)
316 "max.user": *, (glob)
317 "max.wall": *, (glob)
317 "max.wall": *, (glob)
318 "median.comb": *, (glob)
318 "median.comb": *, (glob)
319 "median.count": *, (glob)
319 "median.count": *, (glob)
320 "median.sys": *, (glob)
320 "median.sys": *, (glob)
321 "median.user": *, (glob)
321 "median.user": *, (glob)
322 "median.wall": *, (glob)
322 "median.wall": *, (glob)
323 "sys": *, (glob)
323 "sys": *, (glob)
324 "user": *, (glob)
324 "user": *, (glob)
325 "wall": * (glob)
325 "wall": * (glob)
326 }
326 }
327 ]
327 ]
328
328
329 Check perf.py for historical portability
329 Check perf.py for historical portability
330 ----------------------------------------
330 ----------------------------------------
331
331
332 $ cd "$TESTDIR/.."
332 $ cd "$TESTDIR/.."
333
333
334 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
334 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
335 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
335 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
336 > "$TESTDIR"/check-perf-code.py contrib/perf.py
336 > "$TESTDIR"/check-perf-code.py contrib/perf.py
337 contrib/perf.py:\d+: (re)
337 contrib/perf.py:\d+: (re)
338 > from mercurial import (
338 > from mercurial import (
339 import newer module separately in try clause for early Mercurial
339 import newer module separately in try clause for early Mercurial
340 contrib/perf.py:\d+: (re)
340 contrib/perf.py:\d+: (re)
341 > from mercurial import (
341 > from mercurial import (
342 import newer module separately in try clause for early Mercurial
342 import newer module separately in try clause for early Mercurial
343 contrib/perf.py:\d+: (re)
343 contrib/perf.py:\d+: (re)
344 > origindexpath = orig.opener.join(orig.indexfile)
344 > origindexpath = orig.opener.join(orig.indexfile)
345 use getvfs()/getsvfs() for early Mercurial
345 use getvfs()/getsvfs() for early Mercurial
346 contrib/perf.py:\d+: (re)
346 contrib/perf.py:\d+: (re)
347 > origdatapath = orig.opener.join(orig.datafile)
347 > origdatapath = orig.opener.join(orig.datafile)
348 use getvfs()/getsvfs() for early Mercurial
348 use getvfs()/getsvfs() for early Mercurial
349 contrib/perf.py:\d+: (re)
349 contrib/perf.py:\d+: (re)
350 > vfs = vfsmod.vfs(tmpdir)
350 > vfs = vfsmod.vfs(tmpdir)
351 use getvfs()/getsvfs() for early Mercurial
351 use getvfs()/getsvfs() for early Mercurial
352 contrib/perf.py:\d+: (re)
352 contrib/perf.py:\d+: (re)
353 > vfs.options = getattr(orig.opener, 'options', None)
353 > vfs.options = getattr(orig.opener, 'options', None)
354 use getvfs()/getsvfs() for early Mercurial
354 use getvfs()/getsvfs() for early Mercurial
355 [1]
355 [1]
General Comments 0
You need to be logged in to leave comments. Login now