##// END OF EJS Templates
perf: introduce a `perf.run-limits` options...
marmoute -
r42186:5a1e621b default
parent child Browse files
Show More
@@ -1,2817 +1,2857 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistic will be reported for each benchmark: best,
11 When set, additional statistic will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of run (default: 1)
16 number of second to wait before any group of run (default: 1)
17
17
18 ``run-limits``
19 Control the number of run each benchmark will perform. The option value
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
21 condition are considered in order with the following logic:
22
23 If benchmark have been running for <time> seconds, and we have performed
24 <numberofrun> iterations, stop the benchmark,
25
26 The default value is: `3.0-100, 10.0-3`
27
18 ``stub``
28 ``stub``
19 When set, benchmark will only be run once, useful for testing (default: off)
29 When set, benchmark will only be run once, useful for testing (default: off)
20 '''
30 '''
21
31
22 # "historical portability" policy of perf.py:
32 # "historical portability" policy of perf.py:
23 #
33 #
24 # We have to do:
34 # We have to do:
25 # - make perf.py "loadable" with as wide Mercurial version as possible
35 # - make perf.py "loadable" with as wide Mercurial version as possible
26 # This doesn't mean that perf commands work correctly with that Mercurial.
36 # This doesn't mean that perf commands work correctly with that Mercurial.
27 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
37 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
28 # - make historical perf command work correctly with as wide Mercurial
38 # - make historical perf command work correctly with as wide Mercurial
29 # version as possible
39 # version as possible
30 #
40 #
31 # We have to do, if possible with reasonable cost:
41 # We have to do, if possible with reasonable cost:
32 # - make recent perf command for historical feature work correctly
42 # - make recent perf command for historical feature work correctly
33 # with early Mercurial
43 # with early Mercurial
34 #
44 #
35 # We don't have to do:
45 # We don't have to do:
36 # - make perf command for recent feature work correctly with early
46 # - make perf command for recent feature work correctly with early
37 # Mercurial
47 # Mercurial
38
48
39 from __future__ import absolute_import
49 from __future__ import absolute_import
40 import contextlib
50 import contextlib
41 import functools
51 import functools
42 import gc
52 import gc
43 import os
53 import os
44 import random
54 import random
45 import shutil
55 import shutil
46 import struct
56 import struct
47 import sys
57 import sys
48 import tempfile
58 import tempfile
49 import threading
59 import threading
50 import time
60 import time
51 from mercurial import (
61 from mercurial import (
52 changegroup,
62 changegroup,
53 cmdutil,
63 cmdutil,
54 commands,
64 commands,
55 copies,
65 copies,
56 error,
66 error,
57 extensions,
67 extensions,
58 hg,
68 hg,
59 mdiff,
69 mdiff,
60 merge,
70 merge,
61 revlog,
71 revlog,
62 util,
72 util,
63 )
73 )
64
74
65 # for "historical portability":
75 # for "historical portability":
66 # try to import modules separately (in dict order), and ignore
76 # try to import modules separately (in dict order), and ignore
67 # failure, because these aren't available with early Mercurial
77 # failure, because these aren't available with early Mercurial
68 try:
78 try:
69 from mercurial import branchmap # since 2.5 (or bcee63733aad)
79 from mercurial import branchmap # since 2.5 (or bcee63733aad)
70 except ImportError:
80 except ImportError:
71 pass
81 pass
72 try:
82 try:
73 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
83 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
74 except ImportError:
84 except ImportError:
75 pass
85 pass
76 try:
86 try:
77 from mercurial import registrar # since 3.7 (or 37d50250b696)
87 from mercurial import registrar # since 3.7 (or 37d50250b696)
78 dir(registrar) # forcibly load it
88 dir(registrar) # forcibly load it
79 except ImportError:
89 except ImportError:
80 registrar = None
90 registrar = None
81 try:
91 try:
82 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
92 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
83 except ImportError:
93 except ImportError:
84 pass
94 pass
85 try:
95 try:
86 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
96 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
87 except ImportError:
97 except ImportError:
88 pass
98 pass
89 try:
99 try:
90 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
100 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
91 except ImportError:
101 except ImportError:
92 pass
102 pass
93
103
94
104
95 def identity(a):
105 def identity(a):
96 return a
106 return a
97
107
98 try:
108 try:
99 from mercurial import pycompat
109 from mercurial import pycompat
100 getargspec = pycompat.getargspec # added to module after 4.5
110 getargspec = pycompat.getargspec # added to module after 4.5
101 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
111 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
102 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
112 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
103 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
113 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
104 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
114 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
105 if pycompat.ispy3:
115 if pycompat.ispy3:
106 _maxint = sys.maxsize # per py3 docs for replacing maxint
116 _maxint = sys.maxsize # per py3 docs for replacing maxint
107 else:
117 else:
108 _maxint = sys.maxint
118 _maxint = sys.maxint
109 except (ImportError, AttributeError):
119 except (ImportError, AttributeError):
110 import inspect
120 import inspect
111 getargspec = inspect.getargspec
121 getargspec = inspect.getargspec
112 _byteskwargs = identity
122 _byteskwargs = identity
113 fsencode = identity # no py3 support
123 fsencode = identity # no py3 support
114 _maxint = sys.maxint # no py3 support
124 _maxint = sys.maxint # no py3 support
115 _sysstr = lambda x: x # no py3 support
125 _sysstr = lambda x: x # no py3 support
116 _xrange = xrange
126 _xrange = xrange
117
127
118 try:
128 try:
119 # 4.7+
129 # 4.7+
120 queue = pycompat.queue.Queue
130 queue = pycompat.queue.Queue
121 except (AttributeError, ImportError):
131 except (AttributeError, ImportError):
122 # <4.7.
132 # <4.7.
123 try:
133 try:
124 queue = pycompat.queue
134 queue = pycompat.queue
125 except (AttributeError, ImportError):
135 except (AttributeError, ImportError):
126 queue = util.queue
136 queue = util.queue
127
137
128 try:
138 try:
129 from mercurial import logcmdutil
139 from mercurial import logcmdutil
130 makelogtemplater = logcmdutil.maketemplater
140 makelogtemplater = logcmdutil.maketemplater
131 except (AttributeError, ImportError):
141 except (AttributeError, ImportError):
132 try:
142 try:
133 makelogtemplater = cmdutil.makelogtemplater
143 makelogtemplater = cmdutil.makelogtemplater
134 except (AttributeError, ImportError):
144 except (AttributeError, ImportError):
135 makelogtemplater = None
145 makelogtemplater = None
136
146
137 # for "historical portability":
147 # for "historical portability":
138 # define util.safehasattr forcibly, because util.safehasattr has been
148 # define util.safehasattr forcibly, because util.safehasattr has been
139 # available since 1.9.3 (or 94b200a11cf7)
149 # available since 1.9.3 (or 94b200a11cf7)
140 _undefined = object()
150 _undefined = object()
141 def safehasattr(thing, attr):
151 def safehasattr(thing, attr):
142 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
152 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
143 setattr(util, 'safehasattr', safehasattr)
153 setattr(util, 'safehasattr', safehasattr)
144
154
145 # for "historical portability":
155 # for "historical portability":
146 # define util.timer forcibly, because util.timer has been available
156 # define util.timer forcibly, because util.timer has been available
147 # since ae5d60bb70c9
157 # since ae5d60bb70c9
148 if safehasattr(time, 'perf_counter'):
158 if safehasattr(time, 'perf_counter'):
149 util.timer = time.perf_counter
159 util.timer = time.perf_counter
150 elif os.name == b'nt':
160 elif os.name == b'nt':
151 util.timer = time.clock
161 util.timer = time.clock
152 else:
162 else:
153 util.timer = time.time
163 util.timer = time.time
154
164
155 # for "historical portability":
165 # for "historical portability":
156 # use locally defined empty option list, if formatteropts isn't
166 # use locally defined empty option list, if formatteropts isn't
157 # available, because commands.formatteropts has been available since
167 # available, because commands.formatteropts has been available since
158 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
168 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
159 # available since 2.2 (or ae5f92e154d3)
169 # available since 2.2 (or ae5f92e154d3)
160 formatteropts = getattr(cmdutil, "formatteropts",
170 formatteropts = getattr(cmdutil, "formatteropts",
161 getattr(commands, "formatteropts", []))
171 getattr(commands, "formatteropts", []))
162
172
163 # for "historical portability":
173 # for "historical portability":
164 # use locally defined option list, if debugrevlogopts isn't available,
174 # use locally defined option list, if debugrevlogopts isn't available,
165 # because commands.debugrevlogopts has been available since 3.7 (or
175 # because commands.debugrevlogopts has been available since 3.7 (or
166 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
176 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
167 # since 1.9 (or a79fea6b3e77).
177 # since 1.9 (or a79fea6b3e77).
168 revlogopts = getattr(cmdutil, "debugrevlogopts",
178 revlogopts = getattr(cmdutil, "debugrevlogopts",
169 getattr(commands, "debugrevlogopts", [
179 getattr(commands, "debugrevlogopts", [
170 (b'c', b'changelog', False, (b'open changelog')),
180 (b'c', b'changelog', False, (b'open changelog')),
171 (b'm', b'manifest', False, (b'open manifest')),
181 (b'm', b'manifest', False, (b'open manifest')),
172 (b'', b'dir', False, (b'open directory manifest')),
182 (b'', b'dir', False, (b'open directory manifest')),
173 ]))
183 ]))
174
184
175 cmdtable = {}
185 cmdtable = {}
176
186
177 # for "historical portability":
187 # for "historical portability":
178 # define parsealiases locally, because cmdutil.parsealiases has been
188 # define parsealiases locally, because cmdutil.parsealiases has been
179 # available since 1.5 (or 6252852b4332)
189 # available since 1.5 (or 6252852b4332)
180 def parsealiases(cmd):
190 def parsealiases(cmd):
181 return cmd.split(b"|")
191 return cmd.split(b"|")
182
192
183 if safehasattr(registrar, 'command'):
193 if safehasattr(registrar, 'command'):
184 command = registrar.command(cmdtable)
194 command = registrar.command(cmdtable)
185 elif safehasattr(cmdutil, 'command'):
195 elif safehasattr(cmdutil, 'command'):
186 command = cmdutil.command(cmdtable)
196 command = cmdutil.command(cmdtable)
187 if b'norepo' not in getargspec(command).args:
197 if b'norepo' not in getargspec(command).args:
188 # for "historical portability":
198 # for "historical portability":
189 # wrap original cmdutil.command, because "norepo" option has
199 # wrap original cmdutil.command, because "norepo" option has
190 # been available since 3.1 (or 75a96326cecb)
200 # been available since 3.1 (or 75a96326cecb)
191 _command = command
201 _command = command
192 def command(name, options=(), synopsis=None, norepo=False):
202 def command(name, options=(), synopsis=None, norepo=False):
193 if norepo:
203 if norepo:
194 commands.norepo += b' %s' % b' '.join(parsealiases(name))
204 commands.norepo += b' %s' % b' '.join(parsealiases(name))
195 return _command(name, list(options), synopsis)
205 return _command(name, list(options), synopsis)
196 else:
206 else:
197 # for "historical portability":
207 # for "historical portability":
198 # define "@command" annotation locally, because cmdutil.command
208 # define "@command" annotation locally, because cmdutil.command
199 # has been available since 1.9 (or 2daa5179e73f)
209 # has been available since 1.9 (or 2daa5179e73f)
200 def command(name, options=(), synopsis=None, norepo=False):
210 def command(name, options=(), synopsis=None, norepo=False):
201 def decorator(func):
211 def decorator(func):
202 if synopsis:
212 if synopsis:
203 cmdtable[name] = func, list(options), synopsis
213 cmdtable[name] = func, list(options), synopsis
204 else:
214 else:
205 cmdtable[name] = func, list(options)
215 cmdtable[name] = func, list(options)
206 if norepo:
216 if norepo:
207 commands.norepo += b' %s' % b' '.join(parsealiases(name))
217 commands.norepo += b' %s' % b' '.join(parsealiases(name))
208 return func
218 return func
209 return decorator
219 return decorator
210
220
211 try:
221 try:
212 import mercurial.registrar
222 import mercurial.registrar
213 import mercurial.configitems
223 import mercurial.configitems
214 configtable = {}
224 configtable = {}
215 configitem = mercurial.registrar.configitem(configtable)
225 configitem = mercurial.registrar.configitem(configtable)
216 configitem(b'perf', b'presleep',
226 configitem(b'perf', b'presleep',
217 default=mercurial.configitems.dynamicdefault,
227 default=mercurial.configitems.dynamicdefault,
218 )
228 )
219 configitem(b'perf', b'stub',
229 configitem(b'perf', b'stub',
220 default=mercurial.configitems.dynamicdefault,
230 default=mercurial.configitems.dynamicdefault,
221 )
231 )
222 configitem(b'perf', b'parentscount',
232 configitem(b'perf', b'parentscount',
223 default=mercurial.configitems.dynamicdefault,
233 default=mercurial.configitems.dynamicdefault,
224 )
234 )
225 configitem(b'perf', b'all-timing',
235 configitem(b'perf', b'all-timing',
226 default=mercurial.configitems.dynamicdefault,
236 default=mercurial.configitems.dynamicdefault,
227 )
237 )
238 configitem(b'perf', b'run-limits',
239 default=mercurial.configitems.dynamicdefault,
240 )
228 except (ImportError, AttributeError):
241 except (ImportError, AttributeError):
229 pass
242 pass
230
243
231 def getlen(ui):
244 def getlen(ui):
232 if ui.configbool(b"perf", b"stub", False):
245 if ui.configbool(b"perf", b"stub", False):
233 return lambda x: 1
246 return lambda x: 1
234 return len
247 return len
235
248
236 def gettimer(ui, opts=None):
249 def gettimer(ui, opts=None):
237 """return a timer function and formatter: (timer, formatter)
250 """return a timer function and formatter: (timer, formatter)
238
251
239 This function exists to gather the creation of formatter in a single
252 This function exists to gather the creation of formatter in a single
240 place instead of duplicating it in all performance commands."""
253 place instead of duplicating it in all performance commands."""
241
254
242 # enforce an idle period before execution to counteract power management
255 # enforce an idle period before execution to counteract power management
243 # experimental config: perf.presleep
256 # experimental config: perf.presleep
244 time.sleep(getint(ui, b"perf", b"presleep", 1))
257 time.sleep(getint(ui, b"perf", b"presleep", 1))
245
258
246 if opts is None:
259 if opts is None:
247 opts = {}
260 opts = {}
248 # redirect all to stderr unless buffer api is in use
261 # redirect all to stderr unless buffer api is in use
249 if not ui._buffers:
262 if not ui._buffers:
250 ui = ui.copy()
263 ui = ui.copy()
251 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
264 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
252 if uifout:
265 if uifout:
253 # for "historical portability":
266 # for "historical portability":
254 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
267 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
255 uifout.set(ui.ferr)
268 uifout.set(ui.ferr)
256
269
257 # get a formatter
270 # get a formatter
258 uiformatter = getattr(ui, 'formatter', None)
271 uiformatter = getattr(ui, 'formatter', None)
259 if uiformatter:
272 if uiformatter:
260 fm = uiformatter(b'perf', opts)
273 fm = uiformatter(b'perf', opts)
261 else:
274 else:
262 # for "historical portability":
275 # for "historical portability":
263 # define formatter locally, because ui.formatter has been
276 # define formatter locally, because ui.formatter has been
264 # available since 2.2 (or ae5f92e154d3)
277 # available since 2.2 (or ae5f92e154d3)
265 from mercurial import node
278 from mercurial import node
266 class defaultformatter(object):
279 class defaultformatter(object):
267 """Minimized composition of baseformatter and plainformatter
280 """Minimized composition of baseformatter and plainformatter
268 """
281 """
269 def __init__(self, ui, topic, opts):
282 def __init__(self, ui, topic, opts):
270 self._ui = ui
283 self._ui = ui
271 if ui.debugflag:
284 if ui.debugflag:
272 self.hexfunc = node.hex
285 self.hexfunc = node.hex
273 else:
286 else:
274 self.hexfunc = node.short
287 self.hexfunc = node.short
275 def __nonzero__(self):
288 def __nonzero__(self):
276 return False
289 return False
277 __bool__ = __nonzero__
290 __bool__ = __nonzero__
278 def startitem(self):
291 def startitem(self):
279 pass
292 pass
280 def data(self, **data):
293 def data(self, **data):
281 pass
294 pass
282 def write(self, fields, deftext, *fielddata, **opts):
295 def write(self, fields, deftext, *fielddata, **opts):
283 self._ui.write(deftext % fielddata, **opts)
296 self._ui.write(deftext % fielddata, **opts)
284 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
297 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
285 if cond:
298 if cond:
286 self._ui.write(deftext % fielddata, **opts)
299 self._ui.write(deftext % fielddata, **opts)
287 def plain(self, text, **opts):
300 def plain(self, text, **opts):
288 self._ui.write(text, **opts)
301 self._ui.write(text, **opts)
289 def end(self):
302 def end(self):
290 pass
303 pass
291 fm = defaultformatter(ui, b'perf', opts)
304 fm = defaultformatter(ui, b'perf', opts)
292
305
293 # stub function, runs code only once instead of in a loop
306 # stub function, runs code only once instead of in a loop
294 # experimental config: perf.stub
307 # experimental config: perf.stub
295 if ui.configbool(b"perf", b"stub", False):
308 if ui.configbool(b"perf", b"stub", False):
296 return functools.partial(stub_timer, fm), fm
309 return functools.partial(stub_timer, fm), fm
297
310
298 # experimental config: perf.all-timing
311 # experimental config: perf.all-timing
299 displayall = ui.configbool(b"perf", b"all-timing", False)
312 displayall = ui.configbool(b"perf", b"all-timing", False)
300 return functools.partial(_timer, fm, displayall=displayall), fm
313
314 # experimental config: perf.run-limits
315 limitspec = ui.configlist(b"perf", b"run-limits", [])
316 limits = []
317 for item in limitspec:
318 parts = item.split('-', 1)
319 if len(parts) < 2:
320 ui.warn(('malformatted run limit entry, missing "-": %s\n'
321 % item))
322 continue
323 try:
324 time_limit = float(parts[0])
325 except ValueError as e:
326 ui.warn(('malformatted run limit entry, %s: %s\n'
327 % (e, item)))
328 continue
329 try:
330 run_limit = int(parts[1])
331 except ValueError as e:
332 ui.warn(('malformatted run limit entry, %s: %s\n'
333 % (e, item)))
334 continue
335 limits.append((time_limit, run_limit))
336 if not limits:
337 limits = DEFAULTLIMITS
338
339 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
340 return t, fm
301
341
302 def stub_timer(fm, func, setup=None, title=None):
342 def stub_timer(fm, func, setup=None, title=None):
303 if setup is not None:
343 if setup is not None:
304 setup()
344 setup()
305 func()
345 func()
306
346
307 @contextlib.contextmanager
347 @contextlib.contextmanager
308 def timeone():
348 def timeone():
309 r = []
349 r = []
310 ostart = os.times()
350 ostart = os.times()
311 cstart = util.timer()
351 cstart = util.timer()
312 yield r
352 yield r
313 cstop = util.timer()
353 cstop = util.timer()
314 ostop = os.times()
354 ostop = os.times()
315 a, b = ostart, ostop
355 a, b = ostart, ostop
316 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
356 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
317
357
318
358
319 # list of stop condition (elapsed time, minimal run count)
359 # list of stop condition (elapsed time, minimal run count)
320 DEFAULTLIMITS = (
360 DEFAULTLIMITS = (
321 (3.0, 100),
361 (3.0, 100),
322 (10.0, 3),
362 (10.0, 3),
323 )
363 )
324
364
325 def _timer(fm, func, setup=None, title=None, displayall=False,
365 def _timer(fm, func, setup=None, title=None, displayall=False,
326 limits=DEFAULTLIMITS):
366 limits=DEFAULTLIMITS):
327 gc.collect()
367 gc.collect()
328 results = []
368 results = []
329 begin = util.timer()
369 begin = util.timer()
330 count = 0
370 count = 0
331 keepgoing = True
371 keepgoing = True
332 while keepgoing:
372 while keepgoing:
333 if setup is not None:
373 if setup is not None:
334 setup()
374 setup()
335 with timeone() as item:
375 with timeone() as item:
336 r = func()
376 r = func()
337 count += 1
377 count += 1
338 results.append(item[0])
378 results.append(item[0])
339 cstop = util.timer()
379 cstop = util.timer()
340 # Look for a stop condition.
380 # Look for a stop condition.
341 elapsed = cstop - begin
381 elapsed = cstop - begin
342 for t, mincount in limits:
382 for t, mincount in limits:
343 if elapsed >= t and count >= mincount:
383 if elapsed >= t and count >= mincount:
344 keepgoing = False
384 keepgoing = False
345 break
385 break
346
386
347 formatone(fm, results, title=title, result=r,
387 formatone(fm, results, title=title, result=r,
348 displayall=displayall)
388 displayall=displayall)
349
389
350 def formatone(fm, timings, title=None, result=None, displayall=False):
390 def formatone(fm, timings, title=None, result=None, displayall=False):
351
391
352 count = len(timings)
392 count = len(timings)
353
393
354 fm.startitem()
394 fm.startitem()
355
395
356 if title:
396 if title:
357 fm.write(b'title', b'! %s\n', title)
397 fm.write(b'title', b'! %s\n', title)
358 if result:
398 if result:
359 fm.write(b'result', b'! result: %s\n', result)
399 fm.write(b'result', b'! result: %s\n', result)
360 def display(role, entry):
400 def display(role, entry):
361 prefix = b''
401 prefix = b''
362 if role != b'best':
402 if role != b'best':
363 prefix = b'%s.' % role
403 prefix = b'%s.' % role
364 fm.plain(b'!')
404 fm.plain(b'!')
365 fm.write(prefix + b'wall', b' wall %f', entry[0])
405 fm.write(prefix + b'wall', b' wall %f', entry[0])
366 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
406 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
367 fm.write(prefix + b'user', b' user %f', entry[1])
407 fm.write(prefix + b'user', b' user %f', entry[1])
368 fm.write(prefix + b'sys', b' sys %f', entry[2])
408 fm.write(prefix + b'sys', b' sys %f', entry[2])
369 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
409 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
370 fm.plain(b'\n')
410 fm.plain(b'\n')
371 timings.sort()
411 timings.sort()
372 min_val = timings[0]
412 min_val = timings[0]
373 display(b'best', min_val)
413 display(b'best', min_val)
374 if displayall:
414 if displayall:
375 max_val = timings[-1]
415 max_val = timings[-1]
376 display(b'max', max_val)
416 display(b'max', max_val)
377 avg = tuple([sum(x) / count for x in zip(*timings)])
417 avg = tuple([sum(x) / count for x in zip(*timings)])
378 display(b'avg', avg)
418 display(b'avg', avg)
379 median = timings[len(timings) // 2]
419 median = timings[len(timings) // 2]
380 display(b'median', median)
420 display(b'median', median)
381
421
382 # utilities for historical portability
422 # utilities for historical portability
383
423
384 def getint(ui, section, name, default):
424 def getint(ui, section, name, default):
385 # for "historical portability":
425 # for "historical portability":
386 # ui.configint has been available since 1.9 (or fa2b596db182)
426 # ui.configint has been available since 1.9 (or fa2b596db182)
387 v = ui.config(section, name, None)
427 v = ui.config(section, name, None)
388 if v is None:
428 if v is None:
389 return default
429 return default
390 try:
430 try:
391 return int(v)
431 return int(v)
392 except ValueError:
432 except ValueError:
393 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
433 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
394 % (section, name, v))
434 % (section, name, v))
395
435
396 def safeattrsetter(obj, name, ignoremissing=False):
436 def safeattrsetter(obj, name, ignoremissing=False):
397 """Ensure that 'obj' has 'name' attribute before subsequent setattr
437 """Ensure that 'obj' has 'name' attribute before subsequent setattr
398
438
399 This function is aborted, if 'obj' doesn't have 'name' attribute
439 This function is aborted, if 'obj' doesn't have 'name' attribute
400 at runtime. This avoids overlooking removal of an attribute, which
440 at runtime. This avoids overlooking removal of an attribute, which
401 breaks assumption of performance measurement, in the future.
441 breaks assumption of performance measurement, in the future.
402
442
403 This function returns the object to (1) assign a new value, and
443 This function returns the object to (1) assign a new value, and
404 (2) restore an original value to the attribute.
444 (2) restore an original value to the attribute.
405
445
406 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
446 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
407 abortion, and this function returns None. This is useful to
447 abortion, and this function returns None. This is useful to
408 examine an attribute, which isn't ensured in all Mercurial
448 examine an attribute, which isn't ensured in all Mercurial
409 versions.
449 versions.
410 """
450 """
411 if not util.safehasattr(obj, name):
451 if not util.safehasattr(obj, name):
412 if ignoremissing:
452 if ignoremissing:
413 return None
453 return None
414 raise error.Abort((b"missing attribute %s of %s might break assumption"
454 raise error.Abort((b"missing attribute %s of %s might break assumption"
415 b" of performance measurement") % (name, obj))
455 b" of performance measurement") % (name, obj))
416
456
417 origvalue = getattr(obj, _sysstr(name))
457 origvalue = getattr(obj, _sysstr(name))
418 class attrutil(object):
458 class attrutil(object):
419 def set(self, newvalue):
459 def set(self, newvalue):
420 setattr(obj, _sysstr(name), newvalue)
460 setattr(obj, _sysstr(name), newvalue)
421 def restore(self):
461 def restore(self):
422 setattr(obj, _sysstr(name), origvalue)
462 setattr(obj, _sysstr(name), origvalue)
423
463
424 return attrutil()
464 return attrutil()
425
465
426 # utilities to examine each internal API changes
466 # utilities to examine each internal API changes
427
467
428 def getbranchmapsubsettable():
468 def getbranchmapsubsettable():
429 # for "historical portability":
469 # for "historical portability":
430 # subsettable is defined in:
470 # subsettable is defined in:
431 # - branchmap since 2.9 (or 175c6fd8cacc)
471 # - branchmap since 2.9 (or 175c6fd8cacc)
432 # - repoview since 2.5 (or 59a9f18d4587)
472 # - repoview since 2.5 (or 59a9f18d4587)
433 for mod in (branchmap, repoview):
473 for mod in (branchmap, repoview):
434 subsettable = getattr(mod, 'subsettable', None)
474 subsettable = getattr(mod, 'subsettable', None)
435 if subsettable:
475 if subsettable:
436 return subsettable
476 return subsettable
437
477
438 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
478 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
439 # branchmap and repoview modules exist, but subsettable attribute
479 # branchmap and repoview modules exist, but subsettable attribute
440 # doesn't)
480 # doesn't)
441 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
481 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
442 hint=b"use 2.5 or later")
482 hint=b"use 2.5 or later")
443
483
444 def getsvfs(repo):
484 def getsvfs(repo):
445 """Return appropriate object to access files under .hg/store
485 """Return appropriate object to access files under .hg/store
446 """
486 """
447 # for "historical portability":
487 # for "historical portability":
448 # repo.svfs has been available since 2.3 (or 7034365089bf)
488 # repo.svfs has been available since 2.3 (or 7034365089bf)
449 svfs = getattr(repo, 'svfs', None)
489 svfs = getattr(repo, 'svfs', None)
450 if svfs:
490 if svfs:
451 return svfs
491 return svfs
452 else:
492 else:
453 return getattr(repo, 'sopener')
493 return getattr(repo, 'sopener')
454
494
455 def getvfs(repo):
495 def getvfs(repo):
456 """Return appropriate object to access files under .hg
496 """Return appropriate object to access files under .hg
457 """
497 """
458 # for "historical portability":
498 # for "historical portability":
459 # repo.vfs has been available since 2.3 (or 7034365089bf)
499 # repo.vfs has been available since 2.3 (or 7034365089bf)
460 vfs = getattr(repo, 'vfs', None)
500 vfs = getattr(repo, 'vfs', None)
461 if vfs:
501 if vfs:
462 return vfs
502 return vfs
463 else:
503 else:
464 return getattr(repo, 'opener')
504 return getattr(repo, 'opener')
465
505
466 def repocleartagscachefunc(repo):
506 def repocleartagscachefunc(repo):
467 """Return the function to clear tags cache according to repo internal API
507 """Return the function to clear tags cache according to repo internal API
468 """
508 """
469 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
509 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
470 # in this case, setattr(repo, '_tagscache', None) or so isn't
510 # in this case, setattr(repo, '_tagscache', None) or so isn't
471 # correct way to clear tags cache, because existing code paths
511 # correct way to clear tags cache, because existing code paths
472 # expect _tagscache to be a structured object.
512 # expect _tagscache to be a structured object.
473 def clearcache():
513 def clearcache():
474 # _tagscache has been filteredpropertycache since 2.5 (or
514 # _tagscache has been filteredpropertycache since 2.5 (or
475 # 98c867ac1330), and delattr() can't work in such case
515 # 98c867ac1330), and delattr() can't work in such case
476 if b'_tagscache' in vars(repo):
516 if b'_tagscache' in vars(repo):
477 del repo.__dict__[b'_tagscache']
517 del repo.__dict__[b'_tagscache']
478 return clearcache
518 return clearcache
479
519
480 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
520 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
481 if repotags: # since 1.4 (or 5614a628d173)
521 if repotags: # since 1.4 (or 5614a628d173)
482 return lambda : repotags.set(None)
522 return lambda : repotags.set(None)
483
523
484 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
524 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
485 if repotagscache: # since 0.6 (or d7df759d0e97)
525 if repotagscache: # since 0.6 (or d7df759d0e97)
486 return lambda : repotagscache.set(None)
526 return lambda : repotagscache.set(None)
487
527
488 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
528 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
489 # this point, but it isn't so problematic, because:
529 # this point, but it isn't so problematic, because:
490 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
530 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
491 # in perftags() causes failure soon
531 # in perftags() causes failure soon
492 # - perf.py itself has been available since 1.1 (or eb240755386d)
532 # - perf.py itself has been available since 1.1 (or eb240755386d)
493 raise error.Abort((b"tags API of this hg command is unknown"))
533 raise error.Abort((b"tags API of this hg command is unknown"))
494
534
495 # utilities to clear cache
535 # utilities to clear cache
496
536
497 def clearfilecache(obj, attrname):
537 def clearfilecache(obj, attrname):
498 unfiltered = getattr(obj, 'unfiltered', None)
538 unfiltered = getattr(obj, 'unfiltered', None)
499 if unfiltered is not None:
539 if unfiltered is not None:
500 obj = obj.unfiltered()
540 obj = obj.unfiltered()
501 if attrname in vars(obj):
541 if attrname in vars(obj):
502 delattr(obj, attrname)
542 delattr(obj, attrname)
503 obj._filecache.pop(attrname, None)
543 obj._filecache.pop(attrname, None)
504
544
505 def clearchangelog(repo):
545 def clearchangelog(repo):
506 if repo is not repo.unfiltered():
546 if repo is not repo.unfiltered():
507 object.__setattr__(repo, r'_clcachekey', None)
547 object.__setattr__(repo, r'_clcachekey', None)
508 object.__setattr__(repo, r'_clcache', None)
548 object.__setattr__(repo, r'_clcache', None)
509 clearfilecache(repo.unfiltered(), 'changelog')
549 clearfilecache(repo.unfiltered(), 'changelog')
510
550
511 # perf commands
551 # perf commands
512
552
513 @command(b'perfwalk', formatteropts)
553 @command(b'perfwalk', formatteropts)
514 def perfwalk(ui, repo, *pats, **opts):
554 def perfwalk(ui, repo, *pats, **opts):
515 opts = _byteskwargs(opts)
555 opts = _byteskwargs(opts)
516 timer, fm = gettimer(ui, opts)
556 timer, fm = gettimer(ui, opts)
517 m = scmutil.match(repo[None], pats, {})
557 m = scmutil.match(repo[None], pats, {})
518 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
558 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
519 ignored=False))))
559 ignored=False))))
520 fm.end()
560 fm.end()
521
561
522 @command(b'perfannotate', formatteropts)
562 @command(b'perfannotate', formatteropts)
523 def perfannotate(ui, repo, f, **opts):
563 def perfannotate(ui, repo, f, **opts):
524 opts = _byteskwargs(opts)
564 opts = _byteskwargs(opts)
525 timer, fm = gettimer(ui, opts)
565 timer, fm = gettimer(ui, opts)
526 fc = repo[b'.'][f]
566 fc = repo[b'.'][f]
527 timer(lambda: len(fc.annotate(True)))
567 timer(lambda: len(fc.annotate(True)))
528 fm.end()
568 fm.end()
529
569
530 @command(b'perfstatus',
570 @command(b'perfstatus',
531 [(b'u', b'unknown', False,
571 [(b'u', b'unknown', False,
532 b'ask status to look for unknown files')] + formatteropts)
572 b'ask status to look for unknown files')] + formatteropts)
533 def perfstatus(ui, repo, **opts):
573 def perfstatus(ui, repo, **opts):
534 opts = _byteskwargs(opts)
574 opts = _byteskwargs(opts)
535 #m = match.always(repo.root, repo.getcwd())
575 #m = match.always(repo.root, repo.getcwd())
536 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
576 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
537 # False))))
577 # False))))
538 timer, fm = gettimer(ui, opts)
578 timer, fm = gettimer(ui, opts)
539 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
579 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
540 fm.end()
580 fm.end()
541
581
542 @command(b'perfaddremove', formatteropts)
582 @command(b'perfaddremove', formatteropts)
543 def perfaddremove(ui, repo, **opts):
583 def perfaddremove(ui, repo, **opts):
544 opts = _byteskwargs(opts)
584 opts = _byteskwargs(opts)
545 timer, fm = gettimer(ui, opts)
585 timer, fm = gettimer(ui, opts)
546 try:
586 try:
547 oldquiet = repo.ui.quiet
587 oldquiet = repo.ui.quiet
548 repo.ui.quiet = True
588 repo.ui.quiet = True
549 matcher = scmutil.match(repo[None])
589 matcher = scmutil.match(repo[None])
550 opts[b'dry_run'] = True
590 opts[b'dry_run'] = True
551 if b'uipathfn' in getargspec(scmutil.addremove).args:
591 if b'uipathfn' in getargspec(scmutil.addremove).args:
552 uipathfn = scmutil.getuipathfn(repo)
592 uipathfn = scmutil.getuipathfn(repo)
553 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
593 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
554 else:
594 else:
555 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
595 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
556 finally:
596 finally:
557 repo.ui.quiet = oldquiet
597 repo.ui.quiet = oldquiet
558 fm.end()
598 fm.end()
559
599
560 def clearcaches(cl):
600 def clearcaches(cl):
561 # behave somewhat consistently across internal API changes
601 # behave somewhat consistently across internal API changes
562 if util.safehasattr(cl, b'clearcaches'):
602 if util.safehasattr(cl, b'clearcaches'):
563 cl.clearcaches()
603 cl.clearcaches()
564 elif util.safehasattr(cl, b'_nodecache'):
604 elif util.safehasattr(cl, b'_nodecache'):
565 from mercurial.node import nullid, nullrev
605 from mercurial.node import nullid, nullrev
566 cl._nodecache = {nullid: nullrev}
606 cl._nodecache = {nullid: nullrev}
567 cl._nodepos = None
607 cl._nodepos = None
568
608
569 @command(b'perfheads', formatteropts)
609 @command(b'perfheads', formatteropts)
570 def perfheads(ui, repo, **opts):
610 def perfheads(ui, repo, **opts):
571 """benchmark the computation of a changelog heads"""
611 """benchmark the computation of a changelog heads"""
572 opts = _byteskwargs(opts)
612 opts = _byteskwargs(opts)
573 timer, fm = gettimer(ui, opts)
613 timer, fm = gettimer(ui, opts)
574 cl = repo.changelog
614 cl = repo.changelog
575 def s():
615 def s():
576 clearcaches(cl)
616 clearcaches(cl)
577 def d():
617 def d():
578 len(cl.headrevs())
618 len(cl.headrevs())
579 timer(d, setup=s)
619 timer(d, setup=s)
580 fm.end()
620 fm.end()
581
621
582 @command(b'perftags', formatteropts+
622 @command(b'perftags', formatteropts+
583 [
623 [
584 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
624 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
585 ])
625 ])
586 def perftags(ui, repo, **opts):
626 def perftags(ui, repo, **opts):
587 opts = _byteskwargs(opts)
627 opts = _byteskwargs(opts)
588 timer, fm = gettimer(ui, opts)
628 timer, fm = gettimer(ui, opts)
589 repocleartagscache = repocleartagscachefunc(repo)
629 repocleartagscache = repocleartagscachefunc(repo)
590 clearrevlogs = opts[b'clear_revlogs']
630 clearrevlogs = opts[b'clear_revlogs']
591 def s():
631 def s():
592 if clearrevlogs:
632 if clearrevlogs:
593 clearchangelog(repo)
633 clearchangelog(repo)
594 clearfilecache(repo.unfiltered(), 'manifest')
634 clearfilecache(repo.unfiltered(), 'manifest')
595 repocleartagscache()
635 repocleartagscache()
596 def t():
636 def t():
597 return len(repo.tags())
637 return len(repo.tags())
598 timer(t, setup=s)
638 timer(t, setup=s)
599 fm.end()
639 fm.end()
600
640
601 @command(b'perfancestors', formatteropts)
641 @command(b'perfancestors', formatteropts)
602 def perfancestors(ui, repo, **opts):
642 def perfancestors(ui, repo, **opts):
603 opts = _byteskwargs(opts)
643 opts = _byteskwargs(opts)
604 timer, fm = gettimer(ui, opts)
644 timer, fm = gettimer(ui, opts)
605 heads = repo.changelog.headrevs()
645 heads = repo.changelog.headrevs()
606 def d():
646 def d():
607 for a in repo.changelog.ancestors(heads):
647 for a in repo.changelog.ancestors(heads):
608 pass
648 pass
609 timer(d)
649 timer(d)
610 fm.end()
650 fm.end()
611
651
612 @command(b'perfancestorset', formatteropts)
652 @command(b'perfancestorset', formatteropts)
613 def perfancestorset(ui, repo, revset, **opts):
653 def perfancestorset(ui, repo, revset, **opts):
614 opts = _byteskwargs(opts)
654 opts = _byteskwargs(opts)
615 timer, fm = gettimer(ui, opts)
655 timer, fm = gettimer(ui, opts)
616 revs = repo.revs(revset)
656 revs = repo.revs(revset)
617 heads = repo.changelog.headrevs()
657 heads = repo.changelog.headrevs()
618 def d():
658 def d():
619 s = repo.changelog.ancestors(heads)
659 s = repo.changelog.ancestors(heads)
620 for rev in revs:
660 for rev in revs:
621 rev in s
661 rev in s
622 timer(d)
662 timer(d)
623 fm.end()
663 fm.end()
624
664
625 @command(b'perfdiscovery', formatteropts, b'PATH')
665 @command(b'perfdiscovery', formatteropts, b'PATH')
626 def perfdiscovery(ui, repo, path, **opts):
666 def perfdiscovery(ui, repo, path, **opts):
627 """benchmark discovery between local repo and the peer at given path
667 """benchmark discovery between local repo and the peer at given path
628 """
668 """
629 repos = [repo, None]
669 repos = [repo, None]
630 timer, fm = gettimer(ui, opts)
670 timer, fm = gettimer(ui, opts)
631 path = ui.expandpath(path)
671 path = ui.expandpath(path)
632
672
633 def s():
673 def s():
634 repos[1] = hg.peer(ui, opts, path)
674 repos[1] = hg.peer(ui, opts, path)
635 def d():
675 def d():
636 setdiscovery.findcommonheads(ui, *repos)
676 setdiscovery.findcommonheads(ui, *repos)
637 timer(d, setup=s)
677 timer(d, setup=s)
638 fm.end()
678 fm.end()
639
679
640 @command(b'perfbookmarks', formatteropts +
680 @command(b'perfbookmarks', formatteropts +
641 [
681 [
642 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
682 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
643 ])
683 ])
644 def perfbookmarks(ui, repo, **opts):
684 def perfbookmarks(ui, repo, **opts):
645 """benchmark parsing bookmarks from disk to memory"""
685 """benchmark parsing bookmarks from disk to memory"""
646 opts = _byteskwargs(opts)
686 opts = _byteskwargs(opts)
647 timer, fm = gettimer(ui, opts)
687 timer, fm = gettimer(ui, opts)
648
688
649 clearrevlogs = opts[b'clear_revlogs']
689 clearrevlogs = opts[b'clear_revlogs']
650 def s():
690 def s():
651 if clearrevlogs:
691 if clearrevlogs:
652 clearchangelog(repo)
692 clearchangelog(repo)
653 clearfilecache(repo, b'_bookmarks')
693 clearfilecache(repo, b'_bookmarks')
654 def d():
694 def d():
655 repo._bookmarks
695 repo._bookmarks
656 timer(d, setup=s)
696 timer(d, setup=s)
657 fm.end()
697 fm.end()
658
698
659 @command(b'perfbundleread', formatteropts, b'BUNDLE')
699 @command(b'perfbundleread', formatteropts, b'BUNDLE')
660 def perfbundleread(ui, repo, bundlepath, **opts):
700 def perfbundleread(ui, repo, bundlepath, **opts):
661 """Benchmark reading of bundle files.
701 """Benchmark reading of bundle files.
662
702
663 This command is meant to isolate the I/O part of bundle reading as
703 This command is meant to isolate the I/O part of bundle reading as
664 much as possible.
704 much as possible.
665 """
705 """
666 from mercurial import (
706 from mercurial import (
667 bundle2,
707 bundle2,
668 exchange,
708 exchange,
669 streamclone,
709 streamclone,
670 )
710 )
671
711
672 opts = _byteskwargs(opts)
712 opts = _byteskwargs(opts)
673
713
674 def makebench(fn):
714 def makebench(fn):
675 def run():
715 def run():
676 with open(bundlepath, b'rb') as fh:
716 with open(bundlepath, b'rb') as fh:
677 bundle = exchange.readbundle(ui, fh, bundlepath)
717 bundle = exchange.readbundle(ui, fh, bundlepath)
678 fn(bundle)
718 fn(bundle)
679
719
680 return run
720 return run
681
721
682 def makereadnbytes(size):
722 def makereadnbytes(size):
683 def run():
723 def run():
684 with open(bundlepath, b'rb') as fh:
724 with open(bundlepath, b'rb') as fh:
685 bundle = exchange.readbundle(ui, fh, bundlepath)
725 bundle = exchange.readbundle(ui, fh, bundlepath)
686 while bundle.read(size):
726 while bundle.read(size):
687 pass
727 pass
688
728
689 return run
729 return run
690
730
691 def makestdioread(size):
731 def makestdioread(size):
692 def run():
732 def run():
693 with open(bundlepath, b'rb') as fh:
733 with open(bundlepath, b'rb') as fh:
694 while fh.read(size):
734 while fh.read(size):
695 pass
735 pass
696
736
697 return run
737 return run
698
738
699 # bundle1
739 # bundle1
700
740
701 def deltaiter(bundle):
741 def deltaiter(bundle):
702 for delta in bundle.deltaiter():
742 for delta in bundle.deltaiter():
703 pass
743 pass
704
744
705 def iterchunks(bundle):
745 def iterchunks(bundle):
706 for chunk in bundle.getchunks():
746 for chunk in bundle.getchunks():
707 pass
747 pass
708
748
709 # bundle2
749 # bundle2
710
750
711 def forwardchunks(bundle):
751 def forwardchunks(bundle):
712 for chunk in bundle._forwardchunks():
752 for chunk in bundle._forwardchunks():
713 pass
753 pass
714
754
715 def iterparts(bundle):
755 def iterparts(bundle):
716 for part in bundle.iterparts():
756 for part in bundle.iterparts():
717 pass
757 pass
718
758
719 def iterpartsseekable(bundle):
759 def iterpartsseekable(bundle):
720 for part in bundle.iterparts(seekable=True):
760 for part in bundle.iterparts(seekable=True):
721 pass
761 pass
722
762
723 def seek(bundle):
763 def seek(bundle):
724 for part in bundle.iterparts(seekable=True):
764 for part in bundle.iterparts(seekable=True):
725 part.seek(0, os.SEEK_END)
765 part.seek(0, os.SEEK_END)
726
766
727 def makepartreadnbytes(size):
767 def makepartreadnbytes(size):
728 def run():
768 def run():
729 with open(bundlepath, b'rb') as fh:
769 with open(bundlepath, b'rb') as fh:
730 bundle = exchange.readbundle(ui, fh, bundlepath)
770 bundle = exchange.readbundle(ui, fh, bundlepath)
731 for part in bundle.iterparts():
771 for part in bundle.iterparts():
732 while part.read(size):
772 while part.read(size):
733 pass
773 pass
734
774
735 return run
775 return run
736
776
737 benches = [
777 benches = [
738 (makestdioread(8192), b'read(8k)'),
778 (makestdioread(8192), b'read(8k)'),
739 (makestdioread(16384), b'read(16k)'),
779 (makestdioread(16384), b'read(16k)'),
740 (makestdioread(32768), b'read(32k)'),
780 (makestdioread(32768), b'read(32k)'),
741 (makestdioread(131072), b'read(128k)'),
781 (makestdioread(131072), b'read(128k)'),
742 ]
782 ]
743
783
744 with open(bundlepath, b'rb') as fh:
784 with open(bundlepath, b'rb') as fh:
745 bundle = exchange.readbundle(ui, fh, bundlepath)
785 bundle = exchange.readbundle(ui, fh, bundlepath)
746
786
747 if isinstance(bundle, changegroup.cg1unpacker):
787 if isinstance(bundle, changegroup.cg1unpacker):
748 benches.extend([
788 benches.extend([
749 (makebench(deltaiter), b'cg1 deltaiter()'),
789 (makebench(deltaiter), b'cg1 deltaiter()'),
750 (makebench(iterchunks), b'cg1 getchunks()'),
790 (makebench(iterchunks), b'cg1 getchunks()'),
751 (makereadnbytes(8192), b'cg1 read(8k)'),
791 (makereadnbytes(8192), b'cg1 read(8k)'),
752 (makereadnbytes(16384), b'cg1 read(16k)'),
792 (makereadnbytes(16384), b'cg1 read(16k)'),
753 (makereadnbytes(32768), b'cg1 read(32k)'),
793 (makereadnbytes(32768), b'cg1 read(32k)'),
754 (makereadnbytes(131072), b'cg1 read(128k)'),
794 (makereadnbytes(131072), b'cg1 read(128k)'),
755 ])
795 ])
756 elif isinstance(bundle, bundle2.unbundle20):
796 elif isinstance(bundle, bundle2.unbundle20):
757 benches.extend([
797 benches.extend([
758 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
798 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
759 (makebench(iterparts), b'bundle2 iterparts()'),
799 (makebench(iterparts), b'bundle2 iterparts()'),
760 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
800 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
761 (makebench(seek), b'bundle2 part seek()'),
801 (makebench(seek), b'bundle2 part seek()'),
762 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
802 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
763 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
803 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
764 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
804 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
765 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
805 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
766 ])
806 ])
767 elif isinstance(bundle, streamclone.streamcloneapplier):
807 elif isinstance(bundle, streamclone.streamcloneapplier):
768 raise error.Abort(b'stream clone bundles not supported')
808 raise error.Abort(b'stream clone bundles not supported')
769 else:
809 else:
770 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
810 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
771
811
772 for fn, title in benches:
812 for fn, title in benches:
773 timer, fm = gettimer(ui, opts)
813 timer, fm = gettimer(ui, opts)
774 timer(fn, title=title)
814 timer(fn, title=title)
775 fm.end()
815 fm.end()
776
816
777 @command(b'perfchangegroupchangelog', formatteropts +
817 @command(b'perfchangegroupchangelog', formatteropts +
778 [(b'', b'cgversion', b'02', b'changegroup version'),
818 [(b'', b'cgversion', b'02', b'changegroup version'),
779 (b'r', b'rev', b'', b'revisions to add to changegroup')])
819 (b'r', b'rev', b'', b'revisions to add to changegroup')])
780 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
820 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
781 """Benchmark producing a changelog group for a changegroup.
821 """Benchmark producing a changelog group for a changegroup.
782
822
783 This measures the time spent processing the changelog during a
823 This measures the time spent processing the changelog during a
784 bundle operation. This occurs during `hg bundle` and on a server
824 bundle operation. This occurs during `hg bundle` and on a server
785 processing a `getbundle` wire protocol request (handles clones
825 processing a `getbundle` wire protocol request (handles clones
786 and pull requests).
826 and pull requests).
787
827
788 By default, all revisions are added to the changegroup.
828 By default, all revisions are added to the changegroup.
789 """
829 """
790 opts = _byteskwargs(opts)
830 opts = _byteskwargs(opts)
791 cl = repo.changelog
831 cl = repo.changelog
792 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
832 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
793 bundler = changegroup.getbundler(cgversion, repo)
833 bundler = changegroup.getbundler(cgversion, repo)
794
834
795 def d():
835 def d():
796 state, chunks = bundler._generatechangelog(cl, nodes)
836 state, chunks = bundler._generatechangelog(cl, nodes)
797 for chunk in chunks:
837 for chunk in chunks:
798 pass
838 pass
799
839
800 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
801
841
802 # Terminal printing can interfere with timing. So disable it.
842 # Terminal printing can interfere with timing. So disable it.
803 with ui.configoverride({(b'progress', b'disable'): True}):
843 with ui.configoverride({(b'progress', b'disable'): True}):
804 timer(d)
844 timer(d)
805
845
806 fm.end()
846 fm.end()
807
847
808 @command(b'perfdirs', formatteropts)
848 @command(b'perfdirs', formatteropts)
809 def perfdirs(ui, repo, **opts):
849 def perfdirs(ui, repo, **opts):
810 opts = _byteskwargs(opts)
850 opts = _byteskwargs(opts)
811 timer, fm = gettimer(ui, opts)
851 timer, fm = gettimer(ui, opts)
812 dirstate = repo.dirstate
852 dirstate = repo.dirstate
813 b'a' in dirstate
853 b'a' in dirstate
814 def d():
854 def d():
815 dirstate.hasdir(b'a')
855 dirstate.hasdir(b'a')
816 del dirstate._map._dirs
856 del dirstate._map._dirs
817 timer(d)
857 timer(d)
818 fm.end()
858 fm.end()
819
859
820 @command(b'perfdirstate', formatteropts)
860 @command(b'perfdirstate', formatteropts)
821 def perfdirstate(ui, repo, **opts):
861 def perfdirstate(ui, repo, **opts):
822 opts = _byteskwargs(opts)
862 opts = _byteskwargs(opts)
823 timer, fm = gettimer(ui, opts)
863 timer, fm = gettimer(ui, opts)
824 b"a" in repo.dirstate
864 b"a" in repo.dirstate
825 def d():
865 def d():
826 repo.dirstate.invalidate()
866 repo.dirstate.invalidate()
827 b"a" in repo.dirstate
867 b"a" in repo.dirstate
828 timer(d)
868 timer(d)
829 fm.end()
869 fm.end()
830
870
831 @command(b'perfdirstatedirs', formatteropts)
871 @command(b'perfdirstatedirs', formatteropts)
832 def perfdirstatedirs(ui, repo, **opts):
872 def perfdirstatedirs(ui, repo, **opts):
833 opts = _byteskwargs(opts)
873 opts = _byteskwargs(opts)
834 timer, fm = gettimer(ui, opts)
874 timer, fm = gettimer(ui, opts)
835 b"a" in repo.dirstate
875 b"a" in repo.dirstate
836 def d():
876 def d():
837 repo.dirstate.hasdir(b"a")
877 repo.dirstate.hasdir(b"a")
838 del repo.dirstate._map._dirs
878 del repo.dirstate._map._dirs
839 timer(d)
879 timer(d)
840 fm.end()
880 fm.end()
841
881
842 @command(b'perfdirstatefoldmap', formatteropts)
882 @command(b'perfdirstatefoldmap', formatteropts)
843 def perfdirstatefoldmap(ui, repo, **opts):
883 def perfdirstatefoldmap(ui, repo, **opts):
844 opts = _byteskwargs(opts)
884 opts = _byteskwargs(opts)
845 timer, fm = gettimer(ui, opts)
885 timer, fm = gettimer(ui, opts)
846 dirstate = repo.dirstate
886 dirstate = repo.dirstate
847 b'a' in dirstate
887 b'a' in dirstate
848 def d():
888 def d():
849 dirstate._map.filefoldmap.get(b'a')
889 dirstate._map.filefoldmap.get(b'a')
850 del dirstate._map.filefoldmap
890 del dirstate._map.filefoldmap
851 timer(d)
891 timer(d)
852 fm.end()
892 fm.end()
853
893
854 @command(b'perfdirfoldmap', formatteropts)
894 @command(b'perfdirfoldmap', formatteropts)
855 def perfdirfoldmap(ui, repo, **opts):
895 def perfdirfoldmap(ui, repo, **opts):
856 opts = _byteskwargs(opts)
896 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
897 timer, fm = gettimer(ui, opts)
858 dirstate = repo.dirstate
898 dirstate = repo.dirstate
859 b'a' in dirstate
899 b'a' in dirstate
860 def d():
900 def d():
861 dirstate._map.dirfoldmap.get(b'a')
901 dirstate._map.dirfoldmap.get(b'a')
862 del dirstate._map.dirfoldmap
902 del dirstate._map.dirfoldmap
863 del dirstate._map._dirs
903 del dirstate._map._dirs
864 timer(d)
904 timer(d)
865 fm.end()
905 fm.end()
866
906
867 @command(b'perfdirstatewrite', formatteropts)
907 @command(b'perfdirstatewrite', formatteropts)
868 def perfdirstatewrite(ui, repo, **opts):
908 def perfdirstatewrite(ui, repo, **opts):
869 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
870 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
871 ds = repo.dirstate
911 ds = repo.dirstate
872 b"a" in ds
912 b"a" in ds
873 def d():
913 def d():
874 ds._dirty = True
914 ds._dirty = True
875 ds.write(repo.currenttransaction())
915 ds.write(repo.currenttransaction())
876 timer(d)
916 timer(d)
877 fm.end()
917 fm.end()
878
918
879 @command(b'perfmergecalculate',
919 @command(b'perfmergecalculate',
880 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
920 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
881 def perfmergecalculate(ui, repo, rev, **opts):
921 def perfmergecalculate(ui, repo, rev, **opts):
882 opts = _byteskwargs(opts)
922 opts = _byteskwargs(opts)
883 timer, fm = gettimer(ui, opts)
923 timer, fm = gettimer(ui, opts)
884 wctx = repo[None]
924 wctx = repo[None]
885 rctx = scmutil.revsingle(repo, rev, rev)
925 rctx = scmutil.revsingle(repo, rev, rev)
886 ancestor = wctx.ancestor(rctx)
926 ancestor = wctx.ancestor(rctx)
887 # we don't want working dir files to be stat'd in the benchmark, so prime
927 # we don't want working dir files to be stat'd in the benchmark, so prime
888 # that cache
928 # that cache
889 wctx.dirty()
929 wctx.dirty()
890 def d():
930 def d():
891 # acceptremote is True because we don't want prompts in the middle of
931 # acceptremote is True because we don't want prompts in the middle of
892 # our benchmark
932 # our benchmark
893 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
933 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
894 acceptremote=True, followcopies=True)
934 acceptremote=True, followcopies=True)
895 timer(d)
935 timer(d)
896 fm.end()
936 fm.end()
897
937
898 @command(b'perfpathcopies', [], b"REV REV")
938 @command(b'perfpathcopies', [], b"REV REV")
899 def perfpathcopies(ui, repo, rev1, rev2, **opts):
939 def perfpathcopies(ui, repo, rev1, rev2, **opts):
900 """benchmark the copy tracing logic"""
940 """benchmark the copy tracing logic"""
901 opts = _byteskwargs(opts)
941 opts = _byteskwargs(opts)
902 timer, fm = gettimer(ui, opts)
942 timer, fm = gettimer(ui, opts)
903 ctx1 = scmutil.revsingle(repo, rev1, rev1)
943 ctx1 = scmutil.revsingle(repo, rev1, rev1)
904 ctx2 = scmutil.revsingle(repo, rev2, rev2)
944 ctx2 = scmutil.revsingle(repo, rev2, rev2)
905 def d():
945 def d():
906 copies.pathcopies(ctx1, ctx2)
946 copies.pathcopies(ctx1, ctx2)
907 timer(d)
947 timer(d)
908 fm.end()
948 fm.end()
909
949
910 @command(b'perfphases',
950 @command(b'perfphases',
911 [(b'', b'full', False, b'include file reading time too'),
951 [(b'', b'full', False, b'include file reading time too'),
912 ], b"")
952 ], b"")
913 def perfphases(ui, repo, **opts):
953 def perfphases(ui, repo, **opts):
914 """benchmark phasesets computation"""
954 """benchmark phasesets computation"""
915 opts = _byteskwargs(opts)
955 opts = _byteskwargs(opts)
916 timer, fm = gettimer(ui, opts)
956 timer, fm = gettimer(ui, opts)
917 _phases = repo._phasecache
957 _phases = repo._phasecache
918 full = opts.get(b'full')
958 full = opts.get(b'full')
919 def d():
959 def d():
920 phases = _phases
960 phases = _phases
921 if full:
961 if full:
922 clearfilecache(repo, b'_phasecache')
962 clearfilecache(repo, b'_phasecache')
923 phases = repo._phasecache
963 phases = repo._phasecache
924 phases.invalidate()
964 phases.invalidate()
925 phases.loadphaserevs(repo)
965 phases.loadphaserevs(repo)
926 timer(d)
966 timer(d)
927 fm.end()
967 fm.end()
928
968
929 @command(b'perfphasesremote',
969 @command(b'perfphasesremote',
930 [], b"[DEST]")
970 [], b"[DEST]")
931 def perfphasesremote(ui, repo, dest=None, **opts):
971 def perfphasesremote(ui, repo, dest=None, **opts):
932 """benchmark time needed to analyse phases of the remote server"""
972 """benchmark time needed to analyse phases of the remote server"""
933 from mercurial.node import (
973 from mercurial.node import (
934 bin,
974 bin,
935 )
975 )
936 from mercurial import (
976 from mercurial import (
937 exchange,
977 exchange,
938 hg,
978 hg,
939 phases,
979 phases,
940 )
980 )
941 opts = _byteskwargs(opts)
981 opts = _byteskwargs(opts)
942 timer, fm = gettimer(ui, opts)
982 timer, fm = gettimer(ui, opts)
943
983
944 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
984 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
945 if not path:
985 if not path:
946 raise error.Abort((b'default repository not configured!'),
986 raise error.Abort((b'default repository not configured!'),
947 hint=(b"see 'hg help config.paths'"))
987 hint=(b"see 'hg help config.paths'"))
948 dest = path.pushloc or path.loc
988 dest = path.pushloc or path.loc
949 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
989 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
950 other = hg.peer(repo, opts, dest)
990 other = hg.peer(repo, opts, dest)
951
991
952 # easier to perform discovery through the operation
992 # easier to perform discovery through the operation
953 op = exchange.pushoperation(repo, other)
993 op = exchange.pushoperation(repo, other)
954 exchange._pushdiscoverychangeset(op)
994 exchange._pushdiscoverychangeset(op)
955
995
956 remotesubset = op.fallbackheads
996 remotesubset = op.fallbackheads
957
997
958 with other.commandexecutor() as e:
998 with other.commandexecutor() as e:
959 remotephases = e.callcommand(b'listkeys',
999 remotephases = e.callcommand(b'listkeys',
960 {b'namespace': b'phases'}).result()
1000 {b'namespace': b'phases'}).result()
961 del other
1001 del other
962 publishing = remotephases.get(b'publishing', False)
1002 publishing = remotephases.get(b'publishing', False)
963 if publishing:
1003 if publishing:
964 ui.status((b'publishing: yes\n'))
1004 ui.status((b'publishing: yes\n'))
965 else:
1005 else:
966 ui.status((b'publishing: no\n'))
1006 ui.status((b'publishing: no\n'))
967
1007
968 nodemap = repo.changelog.nodemap
1008 nodemap = repo.changelog.nodemap
969 nonpublishroots = 0
1009 nonpublishroots = 0
970 for nhex, phase in remotephases.iteritems():
1010 for nhex, phase in remotephases.iteritems():
971 if nhex == b'publishing': # ignore data related to publish option
1011 if nhex == b'publishing': # ignore data related to publish option
972 continue
1012 continue
973 node = bin(nhex)
1013 node = bin(nhex)
974 if node in nodemap and int(phase):
1014 if node in nodemap and int(phase):
975 nonpublishroots += 1
1015 nonpublishroots += 1
976 ui.status((b'number of roots: %d\n') % len(remotephases))
1016 ui.status((b'number of roots: %d\n') % len(remotephases))
977 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1017 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
978 def d():
1018 def d():
979 phases.remotephasessummary(repo,
1019 phases.remotephasessummary(repo,
980 remotesubset,
1020 remotesubset,
981 remotephases)
1021 remotephases)
982 timer(d)
1022 timer(d)
983 fm.end()
1023 fm.end()
984
1024
985 @command(b'perfmanifest',[
1025 @command(b'perfmanifest',[
986 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1026 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
987 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1027 (b'', b'clear-disk', False, b'clear on-disk caches too'),
988 ] + formatteropts, b'REV|NODE')
1028 ] + formatteropts, b'REV|NODE')
989 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1029 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
990 """benchmark the time to read a manifest from disk and return a usable
1030 """benchmark the time to read a manifest from disk and return a usable
991 dict-like object
1031 dict-like object
992
1032
993 Manifest caches are cleared before retrieval."""
1033 Manifest caches are cleared before retrieval."""
994 opts = _byteskwargs(opts)
1034 opts = _byteskwargs(opts)
995 timer, fm = gettimer(ui, opts)
1035 timer, fm = gettimer(ui, opts)
996 if not manifest_rev:
1036 if not manifest_rev:
997 ctx = scmutil.revsingle(repo, rev, rev)
1037 ctx = scmutil.revsingle(repo, rev, rev)
998 t = ctx.manifestnode()
1038 t = ctx.manifestnode()
999 else:
1039 else:
1000 from mercurial.node import bin
1040 from mercurial.node import bin
1001
1041
1002 if len(rev) == 40:
1042 if len(rev) == 40:
1003 t = bin(rev)
1043 t = bin(rev)
1004 else:
1044 else:
1005 try:
1045 try:
1006 rev = int(rev)
1046 rev = int(rev)
1007
1047
1008 if util.safehasattr(repo.manifestlog, b'getstorage'):
1048 if util.safehasattr(repo.manifestlog, b'getstorage'):
1009 t = repo.manifestlog.getstorage(b'').node(rev)
1049 t = repo.manifestlog.getstorage(b'').node(rev)
1010 else:
1050 else:
1011 t = repo.manifestlog._revlog.lookup(rev)
1051 t = repo.manifestlog._revlog.lookup(rev)
1012 except ValueError:
1052 except ValueError:
1013 raise error.Abort(b'manifest revision must be integer or full '
1053 raise error.Abort(b'manifest revision must be integer or full '
1014 b'node')
1054 b'node')
1015 def d():
1055 def d():
1016 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1056 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1017 repo.manifestlog[t].read()
1057 repo.manifestlog[t].read()
1018 timer(d)
1058 timer(d)
1019 fm.end()
1059 fm.end()
1020
1060
1021 @command(b'perfchangeset', formatteropts)
1061 @command(b'perfchangeset', formatteropts)
1022 def perfchangeset(ui, repo, rev, **opts):
1062 def perfchangeset(ui, repo, rev, **opts):
1023 opts = _byteskwargs(opts)
1063 opts = _byteskwargs(opts)
1024 timer, fm = gettimer(ui, opts)
1064 timer, fm = gettimer(ui, opts)
1025 n = scmutil.revsingle(repo, rev).node()
1065 n = scmutil.revsingle(repo, rev).node()
1026 def d():
1066 def d():
1027 repo.changelog.read(n)
1067 repo.changelog.read(n)
1028 #repo.changelog._cache = None
1068 #repo.changelog._cache = None
1029 timer(d)
1069 timer(d)
1030 fm.end()
1070 fm.end()
1031
1071
1032 @command(b'perfignore', formatteropts)
1072 @command(b'perfignore', formatteropts)
1033 def perfignore(ui, repo, **opts):
1073 def perfignore(ui, repo, **opts):
1034 """benchmark operation related to computing ignore"""
1074 """benchmark operation related to computing ignore"""
1035 opts = _byteskwargs(opts)
1075 opts = _byteskwargs(opts)
1036 timer, fm = gettimer(ui, opts)
1076 timer, fm = gettimer(ui, opts)
1037 dirstate = repo.dirstate
1077 dirstate = repo.dirstate
1038
1078
1039 def setupone():
1079 def setupone():
1040 dirstate.invalidate()
1080 dirstate.invalidate()
1041 clearfilecache(dirstate, b'_ignore')
1081 clearfilecache(dirstate, b'_ignore')
1042
1082
1043 def runone():
1083 def runone():
1044 dirstate._ignore
1084 dirstate._ignore
1045
1085
1046 timer(runone, setup=setupone, title=b"load")
1086 timer(runone, setup=setupone, title=b"load")
1047 fm.end()
1087 fm.end()
1048
1088
1049 @command(b'perfindex', [
1089 @command(b'perfindex', [
1050 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1090 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1051 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1091 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1052 ] + formatteropts)
1092 ] + formatteropts)
1053 def perfindex(ui, repo, **opts):
1093 def perfindex(ui, repo, **opts):
1054 """benchmark index creation time followed by a lookup
1094 """benchmark index creation time followed by a lookup
1055
1095
1056 The default is to look `tip` up. Depending on the index implementation,
1096 The default is to look `tip` up. Depending on the index implementation,
1057 the revision looked up can matters. For example, an implementation
1097 the revision looked up can matters. For example, an implementation
1058 scanning the index will have a faster lookup time for `--rev tip` than for
1098 scanning the index will have a faster lookup time for `--rev tip` than for
1059 `--rev 0`. The number of looked up revisions and their order can also
1099 `--rev 0`. The number of looked up revisions and their order can also
1060 matters.
1100 matters.
1061
1101
1062 Example of useful set to test:
1102 Example of useful set to test:
1063 * tip
1103 * tip
1064 * 0
1104 * 0
1065 * -10:
1105 * -10:
1066 * :10
1106 * :10
1067 * -10: + :10
1107 * -10: + :10
1068 * :10: + -10:
1108 * :10: + -10:
1069 * -10000:
1109 * -10000:
1070 * -10000: + 0
1110 * -10000: + 0
1071
1111
1072 It is not currently possible to check for lookup of a missing node. For
1112 It is not currently possible to check for lookup of a missing node. For
1073 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1113 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1074 import mercurial.revlog
1114 import mercurial.revlog
1075 opts = _byteskwargs(opts)
1115 opts = _byteskwargs(opts)
1076 timer, fm = gettimer(ui, opts)
1116 timer, fm = gettimer(ui, opts)
1077 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1117 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1078 if opts[b'no_lookup']:
1118 if opts[b'no_lookup']:
1079 if opts['rev']:
1119 if opts['rev']:
1080 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1120 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1081 nodes = []
1121 nodes = []
1082 elif not opts[b'rev']:
1122 elif not opts[b'rev']:
1083 nodes = [repo[b"tip"].node()]
1123 nodes = [repo[b"tip"].node()]
1084 else:
1124 else:
1085 revs = scmutil.revrange(repo, opts[b'rev'])
1125 revs = scmutil.revrange(repo, opts[b'rev'])
1086 cl = repo.changelog
1126 cl = repo.changelog
1087 nodes = [cl.node(r) for r in revs]
1127 nodes = [cl.node(r) for r in revs]
1088
1128
1089 unfi = repo.unfiltered()
1129 unfi = repo.unfiltered()
1090 # find the filecache func directly
1130 # find the filecache func directly
1091 # This avoid polluting the benchmark with the filecache logic
1131 # This avoid polluting the benchmark with the filecache logic
1092 makecl = unfi.__class__.changelog.func
1132 makecl = unfi.__class__.changelog.func
1093 def setup():
1133 def setup():
1094 # probably not necessary, but for good measure
1134 # probably not necessary, but for good measure
1095 clearchangelog(unfi)
1135 clearchangelog(unfi)
1096 def d():
1136 def d():
1097 cl = makecl(unfi)
1137 cl = makecl(unfi)
1098 for n in nodes:
1138 for n in nodes:
1099 cl.rev(n)
1139 cl.rev(n)
1100 timer(d, setup=setup)
1140 timer(d, setup=setup)
1101 fm.end()
1141 fm.end()
1102
1142
1103 @command(b'perfnodemap', [
1143 @command(b'perfnodemap', [
1104 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1144 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1105 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1145 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1106 ] + formatteropts)
1146 ] + formatteropts)
1107 def perfnodemap(ui, repo, **opts):
1147 def perfnodemap(ui, repo, **opts):
1108 """benchmark the time necessary to look up revision from a cold nodemap
1148 """benchmark the time necessary to look up revision from a cold nodemap
1109
1149
1110 Depending on the implementation, the amount and order of revision we look
1150 Depending on the implementation, the amount and order of revision we look
1111 up can varies. Example of useful set to test:
1151 up can varies. Example of useful set to test:
1112 * tip
1152 * tip
1113 * 0
1153 * 0
1114 * -10:
1154 * -10:
1115 * :10
1155 * :10
1116 * -10: + :10
1156 * -10: + :10
1117 * :10: + -10:
1157 * :10: + -10:
1118 * -10000:
1158 * -10000:
1119 * -10000: + 0
1159 * -10000: + 0
1120
1160
1121 The command currently focus on valid binary lookup. Benchmarking for
1161 The command currently focus on valid binary lookup. Benchmarking for
1122 hexlookup, prefix lookup and missing lookup would also be valuable.
1162 hexlookup, prefix lookup and missing lookup would also be valuable.
1123 """
1163 """
1124 import mercurial.revlog
1164 import mercurial.revlog
1125 opts = _byteskwargs(opts)
1165 opts = _byteskwargs(opts)
1126 timer, fm = gettimer(ui, opts)
1166 timer, fm = gettimer(ui, opts)
1127 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1167 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1128
1168
1129 unfi = repo.unfiltered()
1169 unfi = repo.unfiltered()
1130 clearcaches = opts['clear_caches']
1170 clearcaches = opts['clear_caches']
1131 # find the filecache func directly
1171 # find the filecache func directly
1132 # This avoid polluting the benchmark with the filecache logic
1172 # This avoid polluting the benchmark with the filecache logic
1133 makecl = unfi.__class__.changelog.func
1173 makecl = unfi.__class__.changelog.func
1134 if not opts[b'rev']:
1174 if not opts[b'rev']:
1135 raise error.Abort('use --rev to specify revisions to look up')
1175 raise error.Abort('use --rev to specify revisions to look up')
1136 revs = scmutil.revrange(repo, opts[b'rev'])
1176 revs = scmutil.revrange(repo, opts[b'rev'])
1137 cl = repo.changelog
1177 cl = repo.changelog
1138 nodes = [cl.node(r) for r in revs]
1178 nodes = [cl.node(r) for r in revs]
1139
1179
1140 # use a list to pass reference to a nodemap from one closure to the next
1180 # use a list to pass reference to a nodemap from one closure to the next
1141 nodeget = [None]
1181 nodeget = [None]
1142 def setnodeget():
1182 def setnodeget():
1143 # probably not necessary, but for good measure
1183 # probably not necessary, but for good measure
1144 clearchangelog(unfi)
1184 clearchangelog(unfi)
1145 nodeget[0] = makecl(unfi).nodemap.get
1185 nodeget[0] = makecl(unfi).nodemap.get
1146
1186
1147 def d():
1187 def d():
1148 get = nodeget[0]
1188 get = nodeget[0]
1149 for n in nodes:
1189 for n in nodes:
1150 get(n)
1190 get(n)
1151
1191
1152 setup = None
1192 setup = None
1153 if clearcaches:
1193 if clearcaches:
1154 def setup():
1194 def setup():
1155 setnodeget()
1195 setnodeget()
1156 else:
1196 else:
1157 setnodeget()
1197 setnodeget()
1158 d() # prewarm the data structure
1198 d() # prewarm the data structure
1159 timer(d, setup=setup)
1199 timer(d, setup=setup)
1160 fm.end()
1200 fm.end()
1161
1201
1162 @command(b'perfstartup', formatteropts)
1202 @command(b'perfstartup', formatteropts)
1163 def perfstartup(ui, repo, **opts):
1203 def perfstartup(ui, repo, **opts):
1164 opts = _byteskwargs(opts)
1204 opts = _byteskwargs(opts)
1165 timer, fm = gettimer(ui, opts)
1205 timer, fm = gettimer(ui, opts)
1166 def d():
1206 def d():
1167 if os.name != r'nt':
1207 if os.name != r'nt':
1168 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1208 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1169 fsencode(sys.argv[0]))
1209 fsencode(sys.argv[0]))
1170 else:
1210 else:
1171 os.environ[r'HGRCPATH'] = r' '
1211 os.environ[r'HGRCPATH'] = r' '
1172 os.system(r"%s version -q > NUL" % sys.argv[0])
1212 os.system(r"%s version -q > NUL" % sys.argv[0])
1173 timer(d)
1213 timer(d)
1174 fm.end()
1214 fm.end()
1175
1215
1176 @command(b'perfparents', formatteropts)
1216 @command(b'perfparents', formatteropts)
1177 def perfparents(ui, repo, **opts):
1217 def perfparents(ui, repo, **opts):
1178 """benchmark the time necessary to fetch one changeset's parents.
1218 """benchmark the time necessary to fetch one changeset's parents.
1179
1219
1180 The fetch is done using the `node identifier`, traversing all object layer
1220 The fetch is done using the `node identifier`, traversing all object layer
1181 from the repository object. The N first revision will be used for this
1221 from the repository object. The N first revision will be used for this
1182 benchmark. N is controlled by the ``perf.parentscount`` config option
1222 benchmark. N is controlled by the ``perf.parentscount`` config option
1183 (default: 1000).
1223 (default: 1000).
1184 """
1224 """
1185 opts = _byteskwargs(opts)
1225 opts = _byteskwargs(opts)
1186 timer, fm = gettimer(ui, opts)
1226 timer, fm = gettimer(ui, opts)
1187 # control the number of commits perfparents iterates over
1227 # control the number of commits perfparents iterates over
1188 # experimental config: perf.parentscount
1228 # experimental config: perf.parentscount
1189 count = getint(ui, b"perf", b"parentscount", 1000)
1229 count = getint(ui, b"perf", b"parentscount", 1000)
1190 if len(repo.changelog) < count:
1230 if len(repo.changelog) < count:
1191 raise error.Abort(b"repo needs %d commits for this test" % count)
1231 raise error.Abort(b"repo needs %d commits for this test" % count)
1192 repo = repo.unfiltered()
1232 repo = repo.unfiltered()
1193 nl = [repo.changelog.node(i) for i in _xrange(count)]
1233 nl = [repo.changelog.node(i) for i in _xrange(count)]
1194 def d():
1234 def d():
1195 for n in nl:
1235 for n in nl:
1196 repo.changelog.parents(n)
1236 repo.changelog.parents(n)
1197 timer(d)
1237 timer(d)
1198 fm.end()
1238 fm.end()
1199
1239
1200 @command(b'perfctxfiles', formatteropts)
1240 @command(b'perfctxfiles', formatteropts)
1201 def perfctxfiles(ui, repo, x, **opts):
1241 def perfctxfiles(ui, repo, x, **opts):
1202 opts = _byteskwargs(opts)
1242 opts = _byteskwargs(opts)
1203 x = int(x)
1243 x = int(x)
1204 timer, fm = gettimer(ui, opts)
1244 timer, fm = gettimer(ui, opts)
1205 def d():
1245 def d():
1206 len(repo[x].files())
1246 len(repo[x].files())
1207 timer(d)
1247 timer(d)
1208 fm.end()
1248 fm.end()
1209
1249
1210 @command(b'perfrawfiles', formatteropts)
1250 @command(b'perfrawfiles', formatteropts)
1211 def perfrawfiles(ui, repo, x, **opts):
1251 def perfrawfiles(ui, repo, x, **opts):
1212 opts = _byteskwargs(opts)
1252 opts = _byteskwargs(opts)
1213 x = int(x)
1253 x = int(x)
1214 timer, fm = gettimer(ui, opts)
1254 timer, fm = gettimer(ui, opts)
1215 cl = repo.changelog
1255 cl = repo.changelog
1216 def d():
1256 def d():
1217 len(cl.read(x)[3])
1257 len(cl.read(x)[3])
1218 timer(d)
1258 timer(d)
1219 fm.end()
1259 fm.end()
1220
1260
1221 @command(b'perflookup', formatteropts)
1261 @command(b'perflookup', formatteropts)
1222 def perflookup(ui, repo, rev, **opts):
1262 def perflookup(ui, repo, rev, **opts):
1223 opts = _byteskwargs(opts)
1263 opts = _byteskwargs(opts)
1224 timer, fm = gettimer(ui, opts)
1264 timer, fm = gettimer(ui, opts)
1225 timer(lambda: len(repo.lookup(rev)))
1265 timer(lambda: len(repo.lookup(rev)))
1226 fm.end()
1266 fm.end()
1227
1267
1228 @command(b'perflinelogedits',
1268 @command(b'perflinelogedits',
1229 [(b'n', b'edits', 10000, b'number of edits'),
1269 [(b'n', b'edits', 10000, b'number of edits'),
1230 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1270 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1231 ], norepo=True)
1271 ], norepo=True)
1232 def perflinelogedits(ui, **opts):
1272 def perflinelogedits(ui, **opts):
1233 from mercurial import linelog
1273 from mercurial import linelog
1234
1274
1235 opts = _byteskwargs(opts)
1275 opts = _byteskwargs(opts)
1236
1276
1237 edits = opts[b'edits']
1277 edits = opts[b'edits']
1238 maxhunklines = opts[b'max_hunk_lines']
1278 maxhunklines = opts[b'max_hunk_lines']
1239
1279
1240 maxb1 = 100000
1280 maxb1 = 100000
1241 random.seed(0)
1281 random.seed(0)
1242 randint = random.randint
1282 randint = random.randint
1243 currentlines = 0
1283 currentlines = 0
1244 arglist = []
1284 arglist = []
1245 for rev in _xrange(edits):
1285 for rev in _xrange(edits):
1246 a1 = randint(0, currentlines)
1286 a1 = randint(0, currentlines)
1247 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1287 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1248 b1 = randint(0, maxb1)
1288 b1 = randint(0, maxb1)
1249 b2 = randint(b1, b1 + maxhunklines)
1289 b2 = randint(b1, b1 + maxhunklines)
1250 currentlines += (b2 - b1) - (a2 - a1)
1290 currentlines += (b2 - b1) - (a2 - a1)
1251 arglist.append((rev, a1, a2, b1, b2))
1291 arglist.append((rev, a1, a2, b1, b2))
1252
1292
1253 def d():
1293 def d():
1254 ll = linelog.linelog()
1294 ll = linelog.linelog()
1255 for args in arglist:
1295 for args in arglist:
1256 ll.replacelines(*args)
1296 ll.replacelines(*args)
1257
1297
1258 timer, fm = gettimer(ui, opts)
1298 timer, fm = gettimer(ui, opts)
1259 timer(d)
1299 timer(d)
1260 fm.end()
1300 fm.end()
1261
1301
1262 @command(b'perfrevrange', formatteropts)
1302 @command(b'perfrevrange', formatteropts)
1263 def perfrevrange(ui, repo, *specs, **opts):
1303 def perfrevrange(ui, repo, *specs, **opts):
1264 opts = _byteskwargs(opts)
1304 opts = _byteskwargs(opts)
1265 timer, fm = gettimer(ui, opts)
1305 timer, fm = gettimer(ui, opts)
1266 revrange = scmutil.revrange
1306 revrange = scmutil.revrange
1267 timer(lambda: len(revrange(repo, specs)))
1307 timer(lambda: len(revrange(repo, specs)))
1268 fm.end()
1308 fm.end()
1269
1309
1270 @command(b'perfnodelookup', formatteropts)
1310 @command(b'perfnodelookup', formatteropts)
1271 def perfnodelookup(ui, repo, rev, **opts):
1311 def perfnodelookup(ui, repo, rev, **opts):
1272 opts = _byteskwargs(opts)
1312 opts = _byteskwargs(opts)
1273 timer, fm = gettimer(ui, opts)
1313 timer, fm = gettimer(ui, opts)
1274 import mercurial.revlog
1314 import mercurial.revlog
1275 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1315 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1276 n = scmutil.revsingle(repo, rev).node()
1316 n = scmutil.revsingle(repo, rev).node()
1277 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1317 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1278 def d():
1318 def d():
1279 cl.rev(n)
1319 cl.rev(n)
1280 clearcaches(cl)
1320 clearcaches(cl)
1281 timer(d)
1321 timer(d)
1282 fm.end()
1322 fm.end()
1283
1323
1284 @command(b'perflog',
1324 @command(b'perflog',
1285 [(b'', b'rename', False, b'ask log to follow renames')
1325 [(b'', b'rename', False, b'ask log to follow renames')
1286 ] + formatteropts)
1326 ] + formatteropts)
1287 def perflog(ui, repo, rev=None, **opts):
1327 def perflog(ui, repo, rev=None, **opts):
1288 opts = _byteskwargs(opts)
1328 opts = _byteskwargs(opts)
1289 if rev is None:
1329 if rev is None:
1290 rev=[]
1330 rev=[]
1291 timer, fm = gettimer(ui, opts)
1331 timer, fm = gettimer(ui, opts)
1292 ui.pushbuffer()
1332 ui.pushbuffer()
1293 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1333 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1294 copies=opts.get(b'rename')))
1334 copies=opts.get(b'rename')))
1295 ui.popbuffer()
1335 ui.popbuffer()
1296 fm.end()
1336 fm.end()
1297
1337
1298 @command(b'perfmoonwalk', formatteropts)
1338 @command(b'perfmoonwalk', formatteropts)
1299 def perfmoonwalk(ui, repo, **opts):
1339 def perfmoonwalk(ui, repo, **opts):
1300 """benchmark walking the changelog backwards
1340 """benchmark walking the changelog backwards
1301
1341
1302 This also loads the changelog data for each revision in the changelog.
1342 This also loads the changelog data for each revision in the changelog.
1303 """
1343 """
1304 opts = _byteskwargs(opts)
1344 opts = _byteskwargs(opts)
1305 timer, fm = gettimer(ui, opts)
1345 timer, fm = gettimer(ui, opts)
1306 def moonwalk():
1346 def moonwalk():
1307 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1347 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1308 ctx = repo[i]
1348 ctx = repo[i]
1309 ctx.branch() # read changelog data (in addition to the index)
1349 ctx.branch() # read changelog data (in addition to the index)
1310 timer(moonwalk)
1350 timer(moonwalk)
1311 fm.end()
1351 fm.end()
1312
1352
1313 @command(b'perftemplating',
1353 @command(b'perftemplating',
1314 [(b'r', b'rev', [], b'revisions to run the template on'),
1354 [(b'r', b'rev', [], b'revisions to run the template on'),
1315 ] + formatteropts)
1355 ] + formatteropts)
1316 def perftemplating(ui, repo, testedtemplate=None, **opts):
1356 def perftemplating(ui, repo, testedtemplate=None, **opts):
1317 """test the rendering time of a given template"""
1357 """test the rendering time of a given template"""
1318 if makelogtemplater is None:
1358 if makelogtemplater is None:
1319 raise error.Abort((b"perftemplating not available with this Mercurial"),
1359 raise error.Abort((b"perftemplating not available with this Mercurial"),
1320 hint=b"use 4.3 or later")
1360 hint=b"use 4.3 or later")
1321
1361
1322 opts = _byteskwargs(opts)
1362 opts = _byteskwargs(opts)
1323
1363
1324 nullui = ui.copy()
1364 nullui = ui.copy()
1325 nullui.fout = open(os.devnull, r'wb')
1365 nullui.fout = open(os.devnull, r'wb')
1326 nullui.disablepager()
1366 nullui.disablepager()
1327 revs = opts.get(b'rev')
1367 revs = opts.get(b'rev')
1328 if not revs:
1368 if not revs:
1329 revs = [b'all()']
1369 revs = [b'all()']
1330 revs = list(scmutil.revrange(repo, revs))
1370 revs = list(scmutil.revrange(repo, revs))
1331
1371
1332 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1372 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1333 b' {author|person}: {desc|firstline}\n')
1373 b' {author|person}: {desc|firstline}\n')
1334 if testedtemplate is None:
1374 if testedtemplate is None:
1335 testedtemplate = defaulttemplate
1375 testedtemplate = defaulttemplate
1336 displayer = makelogtemplater(nullui, repo, testedtemplate)
1376 displayer = makelogtemplater(nullui, repo, testedtemplate)
1337 def format():
1377 def format():
1338 for r in revs:
1378 for r in revs:
1339 ctx = repo[r]
1379 ctx = repo[r]
1340 displayer.show(ctx)
1380 displayer.show(ctx)
1341 displayer.flush(ctx)
1381 displayer.flush(ctx)
1342
1382
1343 timer, fm = gettimer(ui, opts)
1383 timer, fm = gettimer(ui, opts)
1344 timer(format)
1384 timer(format)
1345 fm.end()
1385 fm.end()
1346
1386
1347 @command(b'perfhelper-pathcopies', formatteropts +
1387 @command(b'perfhelper-pathcopies', formatteropts +
1348 [
1388 [
1349 (b'r', b'revs', [], b'restrict search to these revisions'),
1389 (b'r', b'revs', [], b'restrict search to these revisions'),
1350 (b'', b'timing', False, b'provides extra data (costly)'),
1390 (b'', b'timing', False, b'provides extra data (costly)'),
1351 ])
1391 ])
1352 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1392 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1353 """find statistic about potential parameters for the `perftracecopies`
1393 """find statistic about potential parameters for the `perftracecopies`
1354
1394
1355 This command find source-destination pair relevant for copytracing testing.
1395 This command find source-destination pair relevant for copytracing testing.
1356 It report value for some of the parameters that impact copy tracing time.
1396 It report value for some of the parameters that impact copy tracing time.
1357
1397
1358 If `--timing` is set, rename detection is run and the associated timing
1398 If `--timing` is set, rename detection is run and the associated timing
1359 will be reported. The extra details comes at the cost of a slower command
1399 will be reported. The extra details comes at the cost of a slower command
1360 execution.
1400 execution.
1361
1401
1362 Since the rename detection is only run once, other factors might easily
1402 Since the rename detection is only run once, other factors might easily
1363 affect the precision of the timing. However it should give a good
1403 affect the precision of the timing. However it should give a good
1364 approximation of which revision pairs are very costly.
1404 approximation of which revision pairs are very costly.
1365 """
1405 """
1366 opts = _byteskwargs(opts)
1406 opts = _byteskwargs(opts)
1367 fm = ui.formatter(b'perf', opts)
1407 fm = ui.formatter(b'perf', opts)
1368 dotiming = opts[b'timing']
1408 dotiming = opts[b'timing']
1369
1409
1370 if dotiming:
1410 if dotiming:
1371 header = '%12s %12s %12s %12s %12s %12s\n'
1411 header = '%12s %12s %12s %12s %12s %12s\n'
1372 output = ("%(source)12s %(destination)12s "
1412 output = ("%(source)12s %(destination)12s "
1373 "%(nbrevs)12d %(nbmissingfiles)12d "
1413 "%(nbrevs)12d %(nbmissingfiles)12d "
1374 "%(nbrenamedfiles)12d %(time)18.5f\n")
1414 "%(nbrenamedfiles)12d %(time)18.5f\n")
1375 header_names = ("source", "destination", "nb-revs", "nb-files",
1415 header_names = ("source", "destination", "nb-revs", "nb-files",
1376 "nb-renames", "time")
1416 "nb-renames", "time")
1377 fm.plain(header % header_names)
1417 fm.plain(header % header_names)
1378 else:
1418 else:
1379 header = '%12s %12s %12s %12s\n'
1419 header = '%12s %12s %12s %12s\n'
1380 output = ("%(source)12s %(destination)12s "
1420 output = ("%(source)12s %(destination)12s "
1381 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1421 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1382 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1422 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1383
1423
1384 if not revs:
1424 if not revs:
1385 revs = ['all()']
1425 revs = ['all()']
1386 revs = scmutil.revrange(repo, revs)
1426 revs = scmutil.revrange(repo, revs)
1387
1427
1388 roi = repo.revs('merge() and %ld', revs)
1428 roi = repo.revs('merge() and %ld', revs)
1389 for r in roi:
1429 for r in roi:
1390 ctx = repo[r]
1430 ctx = repo[r]
1391 p1 = ctx.p1().rev()
1431 p1 = ctx.p1().rev()
1392 p2 = ctx.p2().rev()
1432 p2 = ctx.p2().rev()
1393 bases = repo.changelog._commonancestorsheads(p1, p2)
1433 bases = repo.changelog._commonancestorsheads(p1, p2)
1394 for p in (p1, p2):
1434 for p in (p1, p2):
1395 for b in bases:
1435 for b in bases:
1396 base = repo[b]
1436 base = repo[b]
1397 parent = repo[p]
1437 parent = repo[p]
1398 missing = copies._computeforwardmissing(base, parent)
1438 missing = copies._computeforwardmissing(base, parent)
1399 if not missing:
1439 if not missing:
1400 continue
1440 continue
1401 data = {
1441 data = {
1402 b'source': base.hex(),
1442 b'source': base.hex(),
1403 b'destination': parent.hex(),
1443 b'destination': parent.hex(),
1404 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1444 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1405 b'nbmissingfiles': len(missing),
1445 b'nbmissingfiles': len(missing),
1406 }
1446 }
1407 if dotiming:
1447 if dotiming:
1408 begin = util.timer()
1448 begin = util.timer()
1409 renames = copies.pathcopies(base, parent)
1449 renames = copies.pathcopies(base, parent)
1410 end = util.timer()
1450 end = util.timer()
1411 # not very stable timing since we did only one run
1451 # not very stable timing since we did only one run
1412 data['time'] = end - begin
1452 data['time'] = end - begin
1413 data['nbrenamedfiles'] = len(renames)
1453 data['nbrenamedfiles'] = len(renames)
1414 fm.startitem()
1454 fm.startitem()
1415 fm.data(**data)
1455 fm.data(**data)
1416 out = data.copy()
1456 out = data.copy()
1417 out['source'] = fm.hexfunc(base.node())
1457 out['source'] = fm.hexfunc(base.node())
1418 out['destination'] = fm.hexfunc(parent.node())
1458 out['destination'] = fm.hexfunc(parent.node())
1419 fm.plain(output % out)
1459 fm.plain(output % out)
1420
1460
1421 fm.end()
1461 fm.end()
1422
1462
1423 @command(b'perfcca', formatteropts)
1463 @command(b'perfcca', formatteropts)
1424 def perfcca(ui, repo, **opts):
1464 def perfcca(ui, repo, **opts):
1425 opts = _byteskwargs(opts)
1465 opts = _byteskwargs(opts)
1426 timer, fm = gettimer(ui, opts)
1466 timer, fm = gettimer(ui, opts)
1427 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1467 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1428 fm.end()
1468 fm.end()
1429
1469
1430 @command(b'perffncacheload', formatteropts)
1470 @command(b'perffncacheload', formatteropts)
1431 def perffncacheload(ui, repo, **opts):
1471 def perffncacheload(ui, repo, **opts):
1432 opts = _byteskwargs(opts)
1472 opts = _byteskwargs(opts)
1433 timer, fm = gettimer(ui, opts)
1473 timer, fm = gettimer(ui, opts)
1434 s = repo.store
1474 s = repo.store
1435 def d():
1475 def d():
1436 s.fncache._load()
1476 s.fncache._load()
1437 timer(d)
1477 timer(d)
1438 fm.end()
1478 fm.end()
1439
1479
1440 @command(b'perffncachewrite', formatteropts)
1480 @command(b'perffncachewrite', formatteropts)
1441 def perffncachewrite(ui, repo, **opts):
1481 def perffncachewrite(ui, repo, **opts):
1442 opts = _byteskwargs(opts)
1482 opts = _byteskwargs(opts)
1443 timer, fm = gettimer(ui, opts)
1483 timer, fm = gettimer(ui, opts)
1444 s = repo.store
1484 s = repo.store
1445 lock = repo.lock()
1485 lock = repo.lock()
1446 s.fncache._load()
1486 s.fncache._load()
1447 tr = repo.transaction(b'perffncachewrite')
1487 tr = repo.transaction(b'perffncachewrite')
1448 tr.addbackup(b'fncache')
1488 tr.addbackup(b'fncache')
1449 def d():
1489 def d():
1450 s.fncache._dirty = True
1490 s.fncache._dirty = True
1451 s.fncache.write(tr)
1491 s.fncache.write(tr)
1452 timer(d)
1492 timer(d)
1453 tr.close()
1493 tr.close()
1454 lock.release()
1494 lock.release()
1455 fm.end()
1495 fm.end()
1456
1496
1457 @command(b'perffncacheencode', formatteropts)
1497 @command(b'perffncacheencode', formatteropts)
1458 def perffncacheencode(ui, repo, **opts):
1498 def perffncacheencode(ui, repo, **opts):
1459 opts = _byteskwargs(opts)
1499 opts = _byteskwargs(opts)
1460 timer, fm = gettimer(ui, opts)
1500 timer, fm = gettimer(ui, opts)
1461 s = repo.store
1501 s = repo.store
1462 s.fncache._load()
1502 s.fncache._load()
1463 def d():
1503 def d():
1464 for p in s.fncache.entries:
1504 for p in s.fncache.entries:
1465 s.encode(p)
1505 s.encode(p)
1466 timer(d)
1506 timer(d)
1467 fm.end()
1507 fm.end()
1468
1508
1469 def _bdiffworker(q, blocks, xdiff, ready, done):
1509 def _bdiffworker(q, blocks, xdiff, ready, done):
1470 while not done.is_set():
1510 while not done.is_set():
1471 pair = q.get()
1511 pair = q.get()
1472 while pair is not None:
1512 while pair is not None:
1473 if xdiff:
1513 if xdiff:
1474 mdiff.bdiff.xdiffblocks(*pair)
1514 mdiff.bdiff.xdiffblocks(*pair)
1475 elif blocks:
1515 elif blocks:
1476 mdiff.bdiff.blocks(*pair)
1516 mdiff.bdiff.blocks(*pair)
1477 else:
1517 else:
1478 mdiff.textdiff(*pair)
1518 mdiff.textdiff(*pair)
1479 q.task_done()
1519 q.task_done()
1480 pair = q.get()
1520 pair = q.get()
1481 q.task_done() # for the None one
1521 q.task_done() # for the None one
1482 with ready:
1522 with ready:
1483 ready.wait()
1523 ready.wait()
1484
1524
1485 def _manifestrevision(repo, mnode):
1525 def _manifestrevision(repo, mnode):
1486 ml = repo.manifestlog
1526 ml = repo.manifestlog
1487
1527
1488 if util.safehasattr(ml, b'getstorage'):
1528 if util.safehasattr(ml, b'getstorage'):
1489 store = ml.getstorage(b'')
1529 store = ml.getstorage(b'')
1490 else:
1530 else:
1491 store = ml._revlog
1531 store = ml._revlog
1492
1532
1493 return store.revision(mnode)
1533 return store.revision(mnode)
1494
1534
1495 @command(b'perfbdiff', revlogopts + formatteropts + [
1535 @command(b'perfbdiff', revlogopts + formatteropts + [
1496 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1536 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1497 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1537 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1498 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1538 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1499 (b'', b'blocks', False, b'test computing diffs into blocks'),
1539 (b'', b'blocks', False, b'test computing diffs into blocks'),
1500 (b'', b'xdiff', False, b'use xdiff algorithm'),
1540 (b'', b'xdiff', False, b'use xdiff algorithm'),
1501 ],
1541 ],
1502
1542
1503 b'-c|-m|FILE REV')
1543 b'-c|-m|FILE REV')
1504 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1544 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1505 """benchmark a bdiff between revisions
1545 """benchmark a bdiff between revisions
1506
1546
1507 By default, benchmark a bdiff between its delta parent and itself.
1547 By default, benchmark a bdiff between its delta parent and itself.
1508
1548
1509 With ``--count``, benchmark bdiffs between delta parents and self for N
1549 With ``--count``, benchmark bdiffs between delta parents and self for N
1510 revisions starting at the specified revision.
1550 revisions starting at the specified revision.
1511
1551
1512 With ``--alldata``, assume the requested revision is a changeset and
1552 With ``--alldata``, assume the requested revision is a changeset and
1513 measure bdiffs for all changes related to that changeset (manifest
1553 measure bdiffs for all changes related to that changeset (manifest
1514 and filelogs).
1554 and filelogs).
1515 """
1555 """
1516 opts = _byteskwargs(opts)
1556 opts = _byteskwargs(opts)
1517
1557
1518 if opts[b'xdiff'] and not opts[b'blocks']:
1558 if opts[b'xdiff'] and not opts[b'blocks']:
1519 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1559 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1520
1560
1521 if opts[b'alldata']:
1561 if opts[b'alldata']:
1522 opts[b'changelog'] = True
1562 opts[b'changelog'] = True
1523
1563
1524 if opts.get(b'changelog') or opts.get(b'manifest'):
1564 if opts.get(b'changelog') or opts.get(b'manifest'):
1525 file_, rev = None, file_
1565 file_, rev = None, file_
1526 elif rev is None:
1566 elif rev is None:
1527 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1567 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1528
1568
1529 blocks = opts[b'blocks']
1569 blocks = opts[b'blocks']
1530 xdiff = opts[b'xdiff']
1570 xdiff = opts[b'xdiff']
1531 textpairs = []
1571 textpairs = []
1532
1572
1533 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1573 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1534
1574
1535 startrev = r.rev(r.lookup(rev))
1575 startrev = r.rev(r.lookup(rev))
1536 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1576 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1537 if opts[b'alldata']:
1577 if opts[b'alldata']:
1538 # Load revisions associated with changeset.
1578 # Load revisions associated with changeset.
1539 ctx = repo[rev]
1579 ctx = repo[rev]
1540 mtext = _manifestrevision(repo, ctx.manifestnode())
1580 mtext = _manifestrevision(repo, ctx.manifestnode())
1541 for pctx in ctx.parents():
1581 for pctx in ctx.parents():
1542 pman = _manifestrevision(repo, pctx.manifestnode())
1582 pman = _manifestrevision(repo, pctx.manifestnode())
1543 textpairs.append((pman, mtext))
1583 textpairs.append((pman, mtext))
1544
1584
1545 # Load filelog revisions by iterating manifest delta.
1585 # Load filelog revisions by iterating manifest delta.
1546 man = ctx.manifest()
1586 man = ctx.manifest()
1547 pman = ctx.p1().manifest()
1587 pman = ctx.p1().manifest()
1548 for filename, change in pman.diff(man).items():
1588 for filename, change in pman.diff(man).items():
1549 fctx = repo.file(filename)
1589 fctx = repo.file(filename)
1550 f1 = fctx.revision(change[0][0] or -1)
1590 f1 = fctx.revision(change[0][0] or -1)
1551 f2 = fctx.revision(change[1][0] or -1)
1591 f2 = fctx.revision(change[1][0] or -1)
1552 textpairs.append((f1, f2))
1592 textpairs.append((f1, f2))
1553 else:
1593 else:
1554 dp = r.deltaparent(rev)
1594 dp = r.deltaparent(rev)
1555 textpairs.append((r.revision(dp), r.revision(rev)))
1595 textpairs.append((r.revision(dp), r.revision(rev)))
1556
1596
1557 withthreads = threads > 0
1597 withthreads = threads > 0
1558 if not withthreads:
1598 if not withthreads:
1559 def d():
1599 def d():
1560 for pair in textpairs:
1600 for pair in textpairs:
1561 if xdiff:
1601 if xdiff:
1562 mdiff.bdiff.xdiffblocks(*pair)
1602 mdiff.bdiff.xdiffblocks(*pair)
1563 elif blocks:
1603 elif blocks:
1564 mdiff.bdiff.blocks(*pair)
1604 mdiff.bdiff.blocks(*pair)
1565 else:
1605 else:
1566 mdiff.textdiff(*pair)
1606 mdiff.textdiff(*pair)
1567 else:
1607 else:
1568 q = queue()
1608 q = queue()
1569 for i in _xrange(threads):
1609 for i in _xrange(threads):
1570 q.put(None)
1610 q.put(None)
1571 ready = threading.Condition()
1611 ready = threading.Condition()
1572 done = threading.Event()
1612 done = threading.Event()
1573 for i in _xrange(threads):
1613 for i in _xrange(threads):
1574 threading.Thread(target=_bdiffworker,
1614 threading.Thread(target=_bdiffworker,
1575 args=(q, blocks, xdiff, ready, done)).start()
1615 args=(q, blocks, xdiff, ready, done)).start()
1576 q.join()
1616 q.join()
1577 def d():
1617 def d():
1578 for pair in textpairs:
1618 for pair in textpairs:
1579 q.put(pair)
1619 q.put(pair)
1580 for i in _xrange(threads):
1620 for i in _xrange(threads):
1581 q.put(None)
1621 q.put(None)
1582 with ready:
1622 with ready:
1583 ready.notify_all()
1623 ready.notify_all()
1584 q.join()
1624 q.join()
1585 timer, fm = gettimer(ui, opts)
1625 timer, fm = gettimer(ui, opts)
1586 timer(d)
1626 timer(d)
1587 fm.end()
1627 fm.end()
1588
1628
1589 if withthreads:
1629 if withthreads:
1590 done.set()
1630 done.set()
1591 for i in _xrange(threads):
1631 for i in _xrange(threads):
1592 q.put(None)
1632 q.put(None)
1593 with ready:
1633 with ready:
1594 ready.notify_all()
1634 ready.notify_all()
1595
1635
1596 @command(b'perfunidiff', revlogopts + formatteropts + [
1636 @command(b'perfunidiff', revlogopts + formatteropts + [
1597 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1637 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1598 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1638 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1599 ], b'-c|-m|FILE REV')
1639 ], b'-c|-m|FILE REV')
1600 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1640 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1601 """benchmark a unified diff between revisions
1641 """benchmark a unified diff between revisions
1602
1642
1603 This doesn't include any copy tracing - it's just a unified diff
1643 This doesn't include any copy tracing - it's just a unified diff
1604 of the texts.
1644 of the texts.
1605
1645
1606 By default, benchmark a diff between its delta parent and itself.
1646 By default, benchmark a diff between its delta parent and itself.
1607
1647
1608 With ``--count``, benchmark diffs between delta parents and self for N
1648 With ``--count``, benchmark diffs between delta parents and self for N
1609 revisions starting at the specified revision.
1649 revisions starting at the specified revision.
1610
1650
1611 With ``--alldata``, assume the requested revision is a changeset and
1651 With ``--alldata``, assume the requested revision is a changeset and
1612 measure diffs for all changes related to that changeset (manifest
1652 measure diffs for all changes related to that changeset (manifest
1613 and filelogs).
1653 and filelogs).
1614 """
1654 """
1615 opts = _byteskwargs(opts)
1655 opts = _byteskwargs(opts)
1616 if opts[b'alldata']:
1656 if opts[b'alldata']:
1617 opts[b'changelog'] = True
1657 opts[b'changelog'] = True
1618
1658
1619 if opts.get(b'changelog') or opts.get(b'manifest'):
1659 if opts.get(b'changelog') or opts.get(b'manifest'):
1620 file_, rev = None, file_
1660 file_, rev = None, file_
1621 elif rev is None:
1661 elif rev is None:
1622 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1662 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1623
1663
1624 textpairs = []
1664 textpairs = []
1625
1665
1626 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1666 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1627
1667
1628 startrev = r.rev(r.lookup(rev))
1668 startrev = r.rev(r.lookup(rev))
1629 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1669 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1630 if opts[b'alldata']:
1670 if opts[b'alldata']:
1631 # Load revisions associated with changeset.
1671 # Load revisions associated with changeset.
1632 ctx = repo[rev]
1672 ctx = repo[rev]
1633 mtext = _manifestrevision(repo, ctx.manifestnode())
1673 mtext = _manifestrevision(repo, ctx.manifestnode())
1634 for pctx in ctx.parents():
1674 for pctx in ctx.parents():
1635 pman = _manifestrevision(repo, pctx.manifestnode())
1675 pman = _manifestrevision(repo, pctx.manifestnode())
1636 textpairs.append((pman, mtext))
1676 textpairs.append((pman, mtext))
1637
1677
1638 # Load filelog revisions by iterating manifest delta.
1678 # Load filelog revisions by iterating manifest delta.
1639 man = ctx.manifest()
1679 man = ctx.manifest()
1640 pman = ctx.p1().manifest()
1680 pman = ctx.p1().manifest()
1641 for filename, change in pman.diff(man).items():
1681 for filename, change in pman.diff(man).items():
1642 fctx = repo.file(filename)
1682 fctx = repo.file(filename)
1643 f1 = fctx.revision(change[0][0] or -1)
1683 f1 = fctx.revision(change[0][0] or -1)
1644 f2 = fctx.revision(change[1][0] or -1)
1684 f2 = fctx.revision(change[1][0] or -1)
1645 textpairs.append((f1, f2))
1685 textpairs.append((f1, f2))
1646 else:
1686 else:
1647 dp = r.deltaparent(rev)
1687 dp = r.deltaparent(rev)
1648 textpairs.append((r.revision(dp), r.revision(rev)))
1688 textpairs.append((r.revision(dp), r.revision(rev)))
1649
1689
1650 def d():
1690 def d():
1651 for left, right in textpairs:
1691 for left, right in textpairs:
1652 # The date strings don't matter, so we pass empty strings.
1692 # The date strings don't matter, so we pass empty strings.
1653 headerlines, hunks = mdiff.unidiff(
1693 headerlines, hunks = mdiff.unidiff(
1654 left, b'', right, b'', b'left', b'right', binary=False)
1694 left, b'', right, b'', b'left', b'right', binary=False)
1655 # consume iterators in roughly the way patch.py does
1695 # consume iterators in roughly the way patch.py does
1656 b'\n'.join(headerlines)
1696 b'\n'.join(headerlines)
1657 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1697 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1658 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1659 timer(d)
1699 timer(d)
1660 fm.end()
1700 fm.end()
1661
1701
1662 @command(b'perfdiffwd', formatteropts)
1702 @command(b'perfdiffwd', formatteropts)
1663 def perfdiffwd(ui, repo, **opts):
1703 def perfdiffwd(ui, repo, **opts):
1664 """Profile diff of working directory changes"""
1704 """Profile diff of working directory changes"""
1665 opts = _byteskwargs(opts)
1705 opts = _byteskwargs(opts)
1666 timer, fm = gettimer(ui, opts)
1706 timer, fm = gettimer(ui, opts)
1667 options = {
1707 options = {
1668 'w': 'ignore_all_space',
1708 'w': 'ignore_all_space',
1669 'b': 'ignore_space_change',
1709 'b': 'ignore_space_change',
1670 'B': 'ignore_blank_lines',
1710 'B': 'ignore_blank_lines',
1671 }
1711 }
1672
1712
1673 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1713 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1674 opts = dict((options[c], b'1') for c in diffopt)
1714 opts = dict((options[c], b'1') for c in diffopt)
1675 def d():
1715 def d():
1676 ui.pushbuffer()
1716 ui.pushbuffer()
1677 commands.diff(ui, repo, **opts)
1717 commands.diff(ui, repo, **opts)
1678 ui.popbuffer()
1718 ui.popbuffer()
1679 diffopt = diffopt.encode('ascii')
1719 diffopt = diffopt.encode('ascii')
1680 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1720 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1681 timer(d, title=title)
1721 timer(d, title=title)
1682 fm.end()
1722 fm.end()
1683
1723
1684 @command(b'perfrevlogindex', revlogopts + formatteropts,
1724 @command(b'perfrevlogindex', revlogopts + formatteropts,
1685 b'-c|-m|FILE')
1725 b'-c|-m|FILE')
1686 def perfrevlogindex(ui, repo, file_=None, **opts):
1726 def perfrevlogindex(ui, repo, file_=None, **opts):
1687 """Benchmark operations against a revlog index.
1727 """Benchmark operations against a revlog index.
1688
1728
1689 This tests constructing a revlog instance, reading index data,
1729 This tests constructing a revlog instance, reading index data,
1690 parsing index data, and performing various operations related to
1730 parsing index data, and performing various operations related to
1691 index data.
1731 index data.
1692 """
1732 """
1693
1733
1694 opts = _byteskwargs(opts)
1734 opts = _byteskwargs(opts)
1695
1735
1696 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1736 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1697
1737
1698 opener = getattr(rl, 'opener') # trick linter
1738 opener = getattr(rl, 'opener') # trick linter
1699 indexfile = rl.indexfile
1739 indexfile = rl.indexfile
1700 data = opener.read(indexfile)
1740 data = opener.read(indexfile)
1701
1741
1702 header = struct.unpack(b'>I', data[0:4])[0]
1742 header = struct.unpack(b'>I', data[0:4])[0]
1703 version = header & 0xFFFF
1743 version = header & 0xFFFF
1704 if version == 1:
1744 if version == 1:
1705 revlogio = revlog.revlogio()
1745 revlogio = revlog.revlogio()
1706 inline = header & (1 << 16)
1746 inline = header & (1 << 16)
1707 else:
1747 else:
1708 raise error.Abort((b'unsupported revlog version: %d') % version)
1748 raise error.Abort((b'unsupported revlog version: %d') % version)
1709
1749
1710 rllen = len(rl)
1750 rllen = len(rl)
1711
1751
1712 node0 = rl.node(0)
1752 node0 = rl.node(0)
1713 node25 = rl.node(rllen // 4)
1753 node25 = rl.node(rllen // 4)
1714 node50 = rl.node(rllen // 2)
1754 node50 = rl.node(rllen // 2)
1715 node75 = rl.node(rllen // 4 * 3)
1755 node75 = rl.node(rllen // 4 * 3)
1716 node100 = rl.node(rllen - 1)
1756 node100 = rl.node(rllen - 1)
1717
1757
1718 allrevs = range(rllen)
1758 allrevs = range(rllen)
1719 allrevsrev = list(reversed(allrevs))
1759 allrevsrev = list(reversed(allrevs))
1720 allnodes = [rl.node(rev) for rev in range(rllen)]
1760 allnodes = [rl.node(rev) for rev in range(rllen)]
1721 allnodesrev = list(reversed(allnodes))
1761 allnodesrev = list(reversed(allnodes))
1722
1762
1723 def constructor():
1763 def constructor():
1724 revlog.revlog(opener, indexfile)
1764 revlog.revlog(opener, indexfile)
1725
1765
1726 def read():
1766 def read():
1727 with opener(indexfile) as fh:
1767 with opener(indexfile) as fh:
1728 fh.read()
1768 fh.read()
1729
1769
1730 def parseindex():
1770 def parseindex():
1731 revlogio.parseindex(data, inline)
1771 revlogio.parseindex(data, inline)
1732
1772
1733 def getentry(revornode):
1773 def getentry(revornode):
1734 index = revlogio.parseindex(data, inline)[0]
1774 index = revlogio.parseindex(data, inline)[0]
1735 index[revornode]
1775 index[revornode]
1736
1776
1737 def getentries(revs, count=1):
1777 def getentries(revs, count=1):
1738 index = revlogio.parseindex(data, inline)[0]
1778 index = revlogio.parseindex(data, inline)[0]
1739
1779
1740 for i in range(count):
1780 for i in range(count):
1741 for rev in revs:
1781 for rev in revs:
1742 index[rev]
1782 index[rev]
1743
1783
1744 def resolvenode(node):
1784 def resolvenode(node):
1745 nodemap = revlogio.parseindex(data, inline)[1]
1785 nodemap = revlogio.parseindex(data, inline)[1]
1746 # This only works for the C code.
1786 # This only works for the C code.
1747 if nodemap is None:
1787 if nodemap is None:
1748 return
1788 return
1749
1789
1750 try:
1790 try:
1751 nodemap[node]
1791 nodemap[node]
1752 except error.RevlogError:
1792 except error.RevlogError:
1753 pass
1793 pass
1754
1794
1755 def resolvenodes(nodes, count=1):
1795 def resolvenodes(nodes, count=1):
1756 nodemap = revlogio.parseindex(data, inline)[1]
1796 nodemap = revlogio.parseindex(data, inline)[1]
1757 if nodemap is None:
1797 if nodemap is None:
1758 return
1798 return
1759
1799
1760 for i in range(count):
1800 for i in range(count):
1761 for node in nodes:
1801 for node in nodes:
1762 try:
1802 try:
1763 nodemap[node]
1803 nodemap[node]
1764 except error.RevlogError:
1804 except error.RevlogError:
1765 pass
1805 pass
1766
1806
1767 benches = [
1807 benches = [
1768 (constructor, b'revlog constructor'),
1808 (constructor, b'revlog constructor'),
1769 (read, b'read'),
1809 (read, b'read'),
1770 (parseindex, b'create index object'),
1810 (parseindex, b'create index object'),
1771 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1811 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1772 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1812 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1773 (lambda: resolvenode(node0), b'look up node at rev 0'),
1813 (lambda: resolvenode(node0), b'look up node at rev 0'),
1774 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1814 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1775 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1815 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1776 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1816 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1777 (lambda: resolvenode(node100), b'look up node at tip'),
1817 (lambda: resolvenode(node100), b'look up node at tip'),
1778 # 2x variation is to measure caching impact.
1818 # 2x variation is to measure caching impact.
1779 (lambda: resolvenodes(allnodes),
1819 (lambda: resolvenodes(allnodes),
1780 b'look up all nodes (forward)'),
1820 b'look up all nodes (forward)'),
1781 (lambda: resolvenodes(allnodes, 2),
1821 (lambda: resolvenodes(allnodes, 2),
1782 b'look up all nodes 2x (forward)'),
1822 b'look up all nodes 2x (forward)'),
1783 (lambda: resolvenodes(allnodesrev),
1823 (lambda: resolvenodes(allnodesrev),
1784 b'look up all nodes (reverse)'),
1824 b'look up all nodes (reverse)'),
1785 (lambda: resolvenodes(allnodesrev, 2),
1825 (lambda: resolvenodes(allnodesrev, 2),
1786 b'look up all nodes 2x (reverse)'),
1826 b'look up all nodes 2x (reverse)'),
1787 (lambda: getentries(allrevs),
1827 (lambda: getentries(allrevs),
1788 b'retrieve all index entries (forward)'),
1828 b'retrieve all index entries (forward)'),
1789 (lambda: getentries(allrevs, 2),
1829 (lambda: getentries(allrevs, 2),
1790 b'retrieve all index entries 2x (forward)'),
1830 b'retrieve all index entries 2x (forward)'),
1791 (lambda: getentries(allrevsrev),
1831 (lambda: getentries(allrevsrev),
1792 b'retrieve all index entries (reverse)'),
1832 b'retrieve all index entries (reverse)'),
1793 (lambda: getentries(allrevsrev, 2),
1833 (lambda: getentries(allrevsrev, 2),
1794 b'retrieve all index entries 2x (reverse)'),
1834 b'retrieve all index entries 2x (reverse)'),
1795 ]
1835 ]
1796
1836
1797 for fn, title in benches:
1837 for fn, title in benches:
1798 timer, fm = gettimer(ui, opts)
1838 timer, fm = gettimer(ui, opts)
1799 timer(fn, title=title)
1839 timer(fn, title=title)
1800 fm.end()
1840 fm.end()
1801
1841
1802 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1842 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1803 [(b'd', b'dist', 100, b'distance between the revisions'),
1843 [(b'd', b'dist', 100, b'distance between the revisions'),
1804 (b's', b'startrev', 0, b'revision to start reading at'),
1844 (b's', b'startrev', 0, b'revision to start reading at'),
1805 (b'', b'reverse', False, b'read in reverse')],
1845 (b'', b'reverse', False, b'read in reverse')],
1806 b'-c|-m|FILE')
1846 b'-c|-m|FILE')
1807 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1847 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1808 **opts):
1848 **opts):
1809 """Benchmark reading a series of revisions from a revlog.
1849 """Benchmark reading a series of revisions from a revlog.
1810
1850
1811 By default, we read every ``-d/--dist`` revision from 0 to tip of
1851 By default, we read every ``-d/--dist`` revision from 0 to tip of
1812 the specified revlog.
1852 the specified revlog.
1813
1853
1814 The start revision can be defined via ``-s/--startrev``.
1854 The start revision can be defined via ``-s/--startrev``.
1815 """
1855 """
1816 opts = _byteskwargs(opts)
1856 opts = _byteskwargs(opts)
1817
1857
1818 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1858 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1819 rllen = getlen(ui)(rl)
1859 rllen = getlen(ui)(rl)
1820
1860
1821 if startrev < 0:
1861 if startrev < 0:
1822 startrev = rllen + startrev
1862 startrev = rllen + startrev
1823
1863
1824 def d():
1864 def d():
1825 rl.clearcaches()
1865 rl.clearcaches()
1826
1866
1827 beginrev = startrev
1867 beginrev = startrev
1828 endrev = rllen
1868 endrev = rllen
1829 dist = opts[b'dist']
1869 dist = opts[b'dist']
1830
1870
1831 if reverse:
1871 if reverse:
1832 beginrev, endrev = endrev - 1, beginrev - 1
1872 beginrev, endrev = endrev - 1, beginrev - 1
1833 dist = -1 * dist
1873 dist = -1 * dist
1834
1874
1835 for x in _xrange(beginrev, endrev, dist):
1875 for x in _xrange(beginrev, endrev, dist):
1836 # Old revisions don't support passing int.
1876 # Old revisions don't support passing int.
1837 n = rl.node(x)
1877 n = rl.node(x)
1838 rl.revision(n)
1878 rl.revision(n)
1839
1879
1840 timer, fm = gettimer(ui, opts)
1880 timer, fm = gettimer(ui, opts)
1841 timer(d)
1881 timer(d)
1842 fm.end()
1882 fm.end()
1843
1883
1844 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1884 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1845 [(b's', b'startrev', 1000, b'revision to start writing at'),
1885 [(b's', b'startrev', 1000, b'revision to start writing at'),
1846 (b'', b'stoprev', -1, b'last revision to write'),
1886 (b'', b'stoprev', -1, b'last revision to write'),
1847 (b'', b'count', 3, b'last revision to write'),
1887 (b'', b'count', 3, b'last revision to write'),
1848 (b'', b'details', False, b'print timing for every revisions tested'),
1888 (b'', b'details', False, b'print timing for every revisions tested'),
1849 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1889 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1850 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1890 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1851 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1891 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1852 ],
1892 ],
1853 b'-c|-m|FILE')
1893 b'-c|-m|FILE')
1854 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1894 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1855 """Benchmark writing a series of revisions to a revlog.
1895 """Benchmark writing a series of revisions to a revlog.
1856
1896
1857 Possible source values are:
1897 Possible source values are:
1858 * `full`: add from a full text (default).
1898 * `full`: add from a full text (default).
1859 * `parent-1`: add from a delta to the first parent
1899 * `parent-1`: add from a delta to the first parent
1860 * `parent-2`: add from a delta to the second parent if it exists
1900 * `parent-2`: add from a delta to the second parent if it exists
1861 (use a delta from the first parent otherwise)
1901 (use a delta from the first parent otherwise)
1862 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1902 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1863 * `storage`: add from the existing precomputed deltas
1903 * `storage`: add from the existing precomputed deltas
1864 """
1904 """
1865 opts = _byteskwargs(opts)
1905 opts = _byteskwargs(opts)
1866
1906
1867 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1907 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1868 rllen = getlen(ui)(rl)
1908 rllen = getlen(ui)(rl)
1869 if startrev < 0:
1909 if startrev < 0:
1870 startrev = rllen + startrev
1910 startrev = rllen + startrev
1871 if stoprev < 0:
1911 if stoprev < 0:
1872 stoprev = rllen + stoprev
1912 stoprev = rllen + stoprev
1873
1913
1874 lazydeltabase = opts['lazydeltabase']
1914 lazydeltabase = opts['lazydeltabase']
1875 source = opts['source']
1915 source = opts['source']
1876 clearcaches = opts['clear_caches']
1916 clearcaches = opts['clear_caches']
1877 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1917 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1878 b'storage')
1918 b'storage')
1879 if source not in validsource:
1919 if source not in validsource:
1880 raise error.Abort('invalid source type: %s' % source)
1920 raise error.Abort('invalid source type: %s' % source)
1881
1921
1882 ### actually gather results
1922 ### actually gather results
1883 count = opts['count']
1923 count = opts['count']
1884 if count <= 0:
1924 if count <= 0:
1885 raise error.Abort('invalide run count: %d' % count)
1925 raise error.Abort('invalide run count: %d' % count)
1886 allresults = []
1926 allresults = []
1887 for c in range(count):
1927 for c in range(count):
1888 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1928 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1889 lazydeltabase=lazydeltabase,
1929 lazydeltabase=lazydeltabase,
1890 clearcaches=clearcaches)
1930 clearcaches=clearcaches)
1891 allresults.append(timing)
1931 allresults.append(timing)
1892
1932
1893 ### consolidate the results in a single list
1933 ### consolidate the results in a single list
1894 results = []
1934 results = []
1895 for idx, (rev, t) in enumerate(allresults[0]):
1935 for idx, (rev, t) in enumerate(allresults[0]):
1896 ts = [t]
1936 ts = [t]
1897 for other in allresults[1:]:
1937 for other in allresults[1:]:
1898 orev, ot = other[idx]
1938 orev, ot = other[idx]
1899 assert orev == rev
1939 assert orev == rev
1900 ts.append(ot)
1940 ts.append(ot)
1901 results.append((rev, ts))
1941 results.append((rev, ts))
1902 resultcount = len(results)
1942 resultcount = len(results)
1903
1943
1904 ### Compute and display relevant statistics
1944 ### Compute and display relevant statistics
1905
1945
1906 # get a formatter
1946 # get a formatter
1907 fm = ui.formatter(b'perf', opts)
1947 fm = ui.formatter(b'perf', opts)
1908 displayall = ui.configbool(b"perf", b"all-timing", False)
1948 displayall = ui.configbool(b"perf", b"all-timing", False)
1909
1949
1910 # print individual details if requested
1950 # print individual details if requested
1911 if opts['details']:
1951 if opts['details']:
1912 for idx, item in enumerate(results, 1):
1952 for idx, item in enumerate(results, 1):
1913 rev, data = item
1953 rev, data = item
1914 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1954 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1915 formatone(fm, data, title=title, displayall=displayall)
1955 formatone(fm, data, title=title, displayall=displayall)
1916
1956
1917 # sorts results by median time
1957 # sorts results by median time
1918 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1958 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1919 # list of (name, index) to display)
1959 # list of (name, index) to display)
1920 relevants = [
1960 relevants = [
1921 ("min", 0),
1961 ("min", 0),
1922 ("10%", resultcount * 10 // 100),
1962 ("10%", resultcount * 10 // 100),
1923 ("25%", resultcount * 25 // 100),
1963 ("25%", resultcount * 25 // 100),
1924 ("50%", resultcount * 70 // 100),
1964 ("50%", resultcount * 70 // 100),
1925 ("75%", resultcount * 75 // 100),
1965 ("75%", resultcount * 75 // 100),
1926 ("90%", resultcount * 90 // 100),
1966 ("90%", resultcount * 90 // 100),
1927 ("95%", resultcount * 95 // 100),
1967 ("95%", resultcount * 95 // 100),
1928 ("99%", resultcount * 99 // 100),
1968 ("99%", resultcount * 99 // 100),
1929 ("99.9%", resultcount * 999 // 1000),
1969 ("99.9%", resultcount * 999 // 1000),
1930 ("99.99%", resultcount * 9999 // 10000),
1970 ("99.99%", resultcount * 9999 // 10000),
1931 ("99.999%", resultcount * 99999 // 100000),
1971 ("99.999%", resultcount * 99999 // 100000),
1932 ("max", -1),
1972 ("max", -1),
1933 ]
1973 ]
1934 if not ui.quiet:
1974 if not ui.quiet:
1935 for name, idx in relevants:
1975 for name, idx in relevants:
1936 data = results[idx]
1976 data = results[idx]
1937 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1977 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1938 formatone(fm, data[1], title=title, displayall=displayall)
1978 formatone(fm, data[1], title=title, displayall=displayall)
1939
1979
1940 # XXX summing that many float will not be very precise, we ignore this fact
1980 # XXX summing that many float will not be very precise, we ignore this fact
1941 # for now
1981 # for now
1942 totaltime = []
1982 totaltime = []
1943 for item in allresults:
1983 for item in allresults:
1944 totaltime.append((sum(x[1][0] for x in item),
1984 totaltime.append((sum(x[1][0] for x in item),
1945 sum(x[1][1] for x in item),
1985 sum(x[1][1] for x in item),
1946 sum(x[1][2] for x in item),)
1986 sum(x[1][2] for x in item),)
1947 )
1987 )
1948 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1988 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1949 displayall=displayall)
1989 displayall=displayall)
1950 fm.end()
1990 fm.end()
1951
1991
1952 class _faketr(object):
1992 class _faketr(object):
1953 def add(s, x, y, z=None):
1993 def add(s, x, y, z=None):
1954 return None
1994 return None
1955
1995
1956 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1996 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1957 lazydeltabase=True, clearcaches=True):
1997 lazydeltabase=True, clearcaches=True):
1958 timings = []
1998 timings = []
1959 tr = _faketr()
1999 tr = _faketr()
1960 with _temprevlog(ui, orig, startrev) as dest:
2000 with _temprevlog(ui, orig, startrev) as dest:
1961 dest._lazydeltabase = lazydeltabase
2001 dest._lazydeltabase = lazydeltabase
1962 revs = list(orig.revs(startrev, stoprev))
2002 revs = list(orig.revs(startrev, stoprev))
1963 total = len(revs)
2003 total = len(revs)
1964 topic = 'adding'
2004 topic = 'adding'
1965 if runidx is not None:
2005 if runidx is not None:
1966 topic += ' (run #%d)' % runidx
2006 topic += ' (run #%d)' % runidx
1967 # Support both old and new progress API
2007 # Support both old and new progress API
1968 if util.safehasattr(ui, 'makeprogress'):
2008 if util.safehasattr(ui, 'makeprogress'):
1969 progress = ui.makeprogress(topic, unit='revs', total=total)
2009 progress = ui.makeprogress(topic, unit='revs', total=total)
1970 def updateprogress(pos):
2010 def updateprogress(pos):
1971 progress.update(pos)
2011 progress.update(pos)
1972 def completeprogress():
2012 def completeprogress():
1973 progress.complete()
2013 progress.complete()
1974 else:
2014 else:
1975 def updateprogress(pos):
2015 def updateprogress(pos):
1976 ui.progress(topic, pos, unit='revs', total=total)
2016 ui.progress(topic, pos, unit='revs', total=total)
1977 def completeprogress():
2017 def completeprogress():
1978 ui.progress(topic, None, unit='revs', total=total)
2018 ui.progress(topic, None, unit='revs', total=total)
1979
2019
1980 for idx, rev in enumerate(revs):
2020 for idx, rev in enumerate(revs):
1981 updateprogress(idx)
2021 updateprogress(idx)
1982 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2022 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1983 if clearcaches:
2023 if clearcaches:
1984 dest.index.clearcaches()
2024 dest.index.clearcaches()
1985 dest.clearcaches()
2025 dest.clearcaches()
1986 with timeone() as r:
2026 with timeone() as r:
1987 dest.addrawrevision(*addargs, **addkwargs)
2027 dest.addrawrevision(*addargs, **addkwargs)
1988 timings.append((rev, r[0]))
2028 timings.append((rev, r[0]))
1989 updateprogress(total)
2029 updateprogress(total)
1990 completeprogress()
2030 completeprogress()
1991 return timings
2031 return timings
1992
2032
1993 def _getrevisionseed(orig, rev, tr, source):
2033 def _getrevisionseed(orig, rev, tr, source):
1994 from mercurial.node import nullid
2034 from mercurial.node import nullid
1995
2035
1996 linkrev = orig.linkrev(rev)
2036 linkrev = orig.linkrev(rev)
1997 node = orig.node(rev)
2037 node = orig.node(rev)
1998 p1, p2 = orig.parents(node)
2038 p1, p2 = orig.parents(node)
1999 flags = orig.flags(rev)
2039 flags = orig.flags(rev)
2000 cachedelta = None
2040 cachedelta = None
2001 text = None
2041 text = None
2002
2042
2003 if source == b'full':
2043 if source == b'full':
2004 text = orig.revision(rev)
2044 text = orig.revision(rev)
2005 elif source == b'parent-1':
2045 elif source == b'parent-1':
2006 baserev = orig.rev(p1)
2046 baserev = orig.rev(p1)
2007 cachedelta = (baserev, orig.revdiff(p1, rev))
2047 cachedelta = (baserev, orig.revdiff(p1, rev))
2008 elif source == b'parent-2':
2048 elif source == b'parent-2':
2009 parent = p2
2049 parent = p2
2010 if p2 == nullid:
2050 if p2 == nullid:
2011 parent = p1
2051 parent = p1
2012 baserev = orig.rev(parent)
2052 baserev = orig.rev(parent)
2013 cachedelta = (baserev, orig.revdiff(parent, rev))
2053 cachedelta = (baserev, orig.revdiff(parent, rev))
2014 elif source == b'parent-smallest':
2054 elif source == b'parent-smallest':
2015 p1diff = orig.revdiff(p1, rev)
2055 p1diff = orig.revdiff(p1, rev)
2016 parent = p1
2056 parent = p1
2017 diff = p1diff
2057 diff = p1diff
2018 if p2 != nullid:
2058 if p2 != nullid:
2019 p2diff = orig.revdiff(p2, rev)
2059 p2diff = orig.revdiff(p2, rev)
2020 if len(p1diff) > len(p2diff):
2060 if len(p1diff) > len(p2diff):
2021 parent = p2
2061 parent = p2
2022 diff = p2diff
2062 diff = p2diff
2023 baserev = orig.rev(parent)
2063 baserev = orig.rev(parent)
2024 cachedelta = (baserev, diff)
2064 cachedelta = (baserev, diff)
2025 elif source == b'storage':
2065 elif source == b'storage':
2026 baserev = orig.deltaparent(rev)
2066 baserev = orig.deltaparent(rev)
2027 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2067 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2028
2068
2029 return ((text, tr, linkrev, p1, p2),
2069 return ((text, tr, linkrev, p1, p2),
2030 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2070 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2031
2071
2032 @contextlib.contextmanager
2072 @contextlib.contextmanager
2033 def _temprevlog(ui, orig, truncaterev):
2073 def _temprevlog(ui, orig, truncaterev):
2034 from mercurial import vfs as vfsmod
2074 from mercurial import vfs as vfsmod
2035
2075
2036 if orig._inline:
2076 if orig._inline:
2037 raise error.Abort('not supporting inline revlog (yet)')
2077 raise error.Abort('not supporting inline revlog (yet)')
2038
2078
2039 origindexpath = orig.opener.join(orig.indexfile)
2079 origindexpath = orig.opener.join(orig.indexfile)
2040 origdatapath = orig.opener.join(orig.datafile)
2080 origdatapath = orig.opener.join(orig.datafile)
2041 indexname = 'revlog.i'
2081 indexname = 'revlog.i'
2042 dataname = 'revlog.d'
2082 dataname = 'revlog.d'
2043
2083
2044 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2084 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2045 try:
2085 try:
2046 # copy the data file in a temporary directory
2086 # copy the data file in a temporary directory
2047 ui.debug('copying data in %s\n' % tmpdir)
2087 ui.debug('copying data in %s\n' % tmpdir)
2048 destindexpath = os.path.join(tmpdir, 'revlog.i')
2088 destindexpath = os.path.join(tmpdir, 'revlog.i')
2049 destdatapath = os.path.join(tmpdir, 'revlog.d')
2089 destdatapath = os.path.join(tmpdir, 'revlog.d')
2050 shutil.copyfile(origindexpath, destindexpath)
2090 shutil.copyfile(origindexpath, destindexpath)
2051 shutil.copyfile(origdatapath, destdatapath)
2091 shutil.copyfile(origdatapath, destdatapath)
2052
2092
2053 # remove the data we want to add again
2093 # remove the data we want to add again
2054 ui.debug('truncating data to be rewritten\n')
2094 ui.debug('truncating data to be rewritten\n')
2055 with open(destindexpath, 'ab') as index:
2095 with open(destindexpath, 'ab') as index:
2056 index.seek(0)
2096 index.seek(0)
2057 index.truncate(truncaterev * orig._io.size)
2097 index.truncate(truncaterev * orig._io.size)
2058 with open(destdatapath, 'ab') as data:
2098 with open(destdatapath, 'ab') as data:
2059 data.seek(0)
2099 data.seek(0)
2060 data.truncate(orig.start(truncaterev))
2100 data.truncate(orig.start(truncaterev))
2061
2101
2062 # instantiate a new revlog from the temporary copy
2102 # instantiate a new revlog from the temporary copy
2063 ui.debug('truncating adding to be rewritten\n')
2103 ui.debug('truncating adding to be rewritten\n')
2064 vfs = vfsmod.vfs(tmpdir)
2104 vfs = vfsmod.vfs(tmpdir)
2065 vfs.options = getattr(orig.opener, 'options', None)
2105 vfs.options = getattr(orig.opener, 'options', None)
2066
2106
2067 dest = revlog.revlog(vfs,
2107 dest = revlog.revlog(vfs,
2068 indexfile=indexname,
2108 indexfile=indexname,
2069 datafile=dataname)
2109 datafile=dataname)
2070 if dest._inline:
2110 if dest._inline:
2071 raise error.Abort('not supporting inline revlog (yet)')
2111 raise error.Abort('not supporting inline revlog (yet)')
2072 # make sure internals are initialized
2112 # make sure internals are initialized
2073 dest.revision(len(dest) - 1)
2113 dest.revision(len(dest) - 1)
2074 yield dest
2114 yield dest
2075 del dest, vfs
2115 del dest, vfs
2076 finally:
2116 finally:
2077 shutil.rmtree(tmpdir, True)
2117 shutil.rmtree(tmpdir, True)
2078
2118
2079 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2119 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2080 [(b'e', b'engines', b'', b'compression engines to use'),
2120 [(b'e', b'engines', b'', b'compression engines to use'),
2081 (b's', b'startrev', 0, b'revision to start at')],
2121 (b's', b'startrev', 0, b'revision to start at')],
2082 b'-c|-m|FILE')
2122 b'-c|-m|FILE')
2083 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2123 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2084 """Benchmark operations on revlog chunks.
2124 """Benchmark operations on revlog chunks.
2085
2125
2086 Logically, each revlog is a collection of fulltext revisions. However,
2126 Logically, each revlog is a collection of fulltext revisions. However,
2087 stored within each revlog are "chunks" of possibly compressed data. This
2127 stored within each revlog are "chunks" of possibly compressed data. This
2088 data needs to be read and decompressed or compressed and written.
2128 data needs to be read and decompressed or compressed and written.
2089
2129
2090 This command measures the time it takes to read+decompress and recompress
2130 This command measures the time it takes to read+decompress and recompress
2091 chunks in a revlog. It effectively isolates I/O and compression performance.
2131 chunks in a revlog. It effectively isolates I/O and compression performance.
2092 For measurements of higher-level operations like resolving revisions,
2132 For measurements of higher-level operations like resolving revisions,
2093 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2133 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2094 """
2134 """
2095 opts = _byteskwargs(opts)
2135 opts = _byteskwargs(opts)
2096
2136
2097 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2137 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2098
2138
2099 # _chunkraw was renamed to _getsegmentforrevs.
2139 # _chunkraw was renamed to _getsegmentforrevs.
2100 try:
2140 try:
2101 segmentforrevs = rl._getsegmentforrevs
2141 segmentforrevs = rl._getsegmentforrevs
2102 except AttributeError:
2142 except AttributeError:
2103 segmentforrevs = rl._chunkraw
2143 segmentforrevs = rl._chunkraw
2104
2144
2105 # Verify engines argument.
2145 # Verify engines argument.
2106 if engines:
2146 if engines:
2107 engines = set(e.strip() for e in engines.split(b','))
2147 engines = set(e.strip() for e in engines.split(b','))
2108 for engine in engines:
2148 for engine in engines:
2109 try:
2149 try:
2110 util.compressionengines[engine]
2150 util.compressionengines[engine]
2111 except KeyError:
2151 except KeyError:
2112 raise error.Abort(b'unknown compression engine: %s' % engine)
2152 raise error.Abort(b'unknown compression engine: %s' % engine)
2113 else:
2153 else:
2114 engines = []
2154 engines = []
2115 for e in util.compengines:
2155 for e in util.compengines:
2116 engine = util.compengines[e]
2156 engine = util.compengines[e]
2117 try:
2157 try:
2118 if engine.available():
2158 if engine.available():
2119 engine.revlogcompressor().compress(b'dummy')
2159 engine.revlogcompressor().compress(b'dummy')
2120 engines.append(e)
2160 engines.append(e)
2121 except NotImplementedError:
2161 except NotImplementedError:
2122 pass
2162 pass
2123
2163
2124 revs = list(rl.revs(startrev, len(rl) - 1))
2164 revs = list(rl.revs(startrev, len(rl) - 1))
2125
2165
2126 def rlfh(rl):
2166 def rlfh(rl):
2127 if rl._inline:
2167 if rl._inline:
2128 return getsvfs(repo)(rl.indexfile)
2168 return getsvfs(repo)(rl.indexfile)
2129 else:
2169 else:
2130 return getsvfs(repo)(rl.datafile)
2170 return getsvfs(repo)(rl.datafile)
2131
2171
2132 def doread():
2172 def doread():
2133 rl.clearcaches()
2173 rl.clearcaches()
2134 for rev in revs:
2174 for rev in revs:
2135 segmentforrevs(rev, rev)
2175 segmentforrevs(rev, rev)
2136
2176
2137 def doreadcachedfh():
2177 def doreadcachedfh():
2138 rl.clearcaches()
2178 rl.clearcaches()
2139 fh = rlfh(rl)
2179 fh = rlfh(rl)
2140 for rev in revs:
2180 for rev in revs:
2141 segmentforrevs(rev, rev, df=fh)
2181 segmentforrevs(rev, rev, df=fh)
2142
2182
2143 def doreadbatch():
2183 def doreadbatch():
2144 rl.clearcaches()
2184 rl.clearcaches()
2145 segmentforrevs(revs[0], revs[-1])
2185 segmentforrevs(revs[0], revs[-1])
2146
2186
2147 def doreadbatchcachedfh():
2187 def doreadbatchcachedfh():
2148 rl.clearcaches()
2188 rl.clearcaches()
2149 fh = rlfh(rl)
2189 fh = rlfh(rl)
2150 segmentforrevs(revs[0], revs[-1], df=fh)
2190 segmentforrevs(revs[0], revs[-1], df=fh)
2151
2191
2152 def dochunk():
2192 def dochunk():
2153 rl.clearcaches()
2193 rl.clearcaches()
2154 fh = rlfh(rl)
2194 fh = rlfh(rl)
2155 for rev in revs:
2195 for rev in revs:
2156 rl._chunk(rev, df=fh)
2196 rl._chunk(rev, df=fh)
2157
2197
2158 chunks = [None]
2198 chunks = [None]
2159
2199
2160 def dochunkbatch():
2200 def dochunkbatch():
2161 rl.clearcaches()
2201 rl.clearcaches()
2162 fh = rlfh(rl)
2202 fh = rlfh(rl)
2163 # Save chunks as a side-effect.
2203 # Save chunks as a side-effect.
2164 chunks[0] = rl._chunks(revs, df=fh)
2204 chunks[0] = rl._chunks(revs, df=fh)
2165
2205
2166 def docompress(compressor):
2206 def docompress(compressor):
2167 rl.clearcaches()
2207 rl.clearcaches()
2168
2208
2169 try:
2209 try:
2170 # Swap in the requested compression engine.
2210 # Swap in the requested compression engine.
2171 oldcompressor = rl._compressor
2211 oldcompressor = rl._compressor
2172 rl._compressor = compressor
2212 rl._compressor = compressor
2173 for chunk in chunks[0]:
2213 for chunk in chunks[0]:
2174 rl.compress(chunk)
2214 rl.compress(chunk)
2175 finally:
2215 finally:
2176 rl._compressor = oldcompressor
2216 rl._compressor = oldcompressor
2177
2217
2178 benches = [
2218 benches = [
2179 (lambda: doread(), b'read'),
2219 (lambda: doread(), b'read'),
2180 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2220 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2181 (lambda: doreadbatch(), b'read batch'),
2221 (lambda: doreadbatch(), b'read batch'),
2182 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2222 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2183 (lambda: dochunk(), b'chunk'),
2223 (lambda: dochunk(), b'chunk'),
2184 (lambda: dochunkbatch(), b'chunk batch'),
2224 (lambda: dochunkbatch(), b'chunk batch'),
2185 ]
2225 ]
2186
2226
2187 for engine in sorted(engines):
2227 for engine in sorted(engines):
2188 compressor = util.compengines[engine].revlogcompressor()
2228 compressor = util.compengines[engine].revlogcompressor()
2189 benches.append((functools.partial(docompress, compressor),
2229 benches.append((functools.partial(docompress, compressor),
2190 b'compress w/ %s' % engine))
2230 b'compress w/ %s' % engine))
2191
2231
2192 for fn, title in benches:
2232 for fn, title in benches:
2193 timer, fm = gettimer(ui, opts)
2233 timer, fm = gettimer(ui, opts)
2194 timer(fn, title=title)
2234 timer(fn, title=title)
2195 fm.end()
2235 fm.end()
2196
2236
2197 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2237 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2198 [(b'', b'cache', False, b'use caches instead of clearing')],
2238 [(b'', b'cache', False, b'use caches instead of clearing')],
2199 b'-c|-m|FILE REV')
2239 b'-c|-m|FILE REV')
2200 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2240 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2201 """Benchmark obtaining a revlog revision.
2241 """Benchmark obtaining a revlog revision.
2202
2242
2203 Obtaining a revlog revision consists of roughly the following steps:
2243 Obtaining a revlog revision consists of roughly the following steps:
2204
2244
2205 1. Compute the delta chain
2245 1. Compute the delta chain
2206 2. Slice the delta chain if applicable
2246 2. Slice the delta chain if applicable
2207 3. Obtain the raw chunks for that delta chain
2247 3. Obtain the raw chunks for that delta chain
2208 4. Decompress each raw chunk
2248 4. Decompress each raw chunk
2209 5. Apply binary patches to obtain fulltext
2249 5. Apply binary patches to obtain fulltext
2210 6. Verify hash of fulltext
2250 6. Verify hash of fulltext
2211
2251
2212 This command measures the time spent in each of these phases.
2252 This command measures the time spent in each of these phases.
2213 """
2253 """
2214 opts = _byteskwargs(opts)
2254 opts = _byteskwargs(opts)
2215
2255
2216 if opts.get(b'changelog') or opts.get(b'manifest'):
2256 if opts.get(b'changelog') or opts.get(b'manifest'):
2217 file_, rev = None, file_
2257 file_, rev = None, file_
2218 elif rev is None:
2258 elif rev is None:
2219 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2259 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2220
2260
2221 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2261 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2222
2262
2223 # _chunkraw was renamed to _getsegmentforrevs.
2263 # _chunkraw was renamed to _getsegmentforrevs.
2224 try:
2264 try:
2225 segmentforrevs = r._getsegmentforrevs
2265 segmentforrevs = r._getsegmentforrevs
2226 except AttributeError:
2266 except AttributeError:
2227 segmentforrevs = r._chunkraw
2267 segmentforrevs = r._chunkraw
2228
2268
2229 node = r.lookup(rev)
2269 node = r.lookup(rev)
2230 rev = r.rev(node)
2270 rev = r.rev(node)
2231
2271
2232 def getrawchunks(data, chain):
2272 def getrawchunks(data, chain):
2233 start = r.start
2273 start = r.start
2234 length = r.length
2274 length = r.length
2235 inline = r._inline
2275 inline = r._inline
2236 iosize = r._io.size
2276 iosize = r._io.size
2237 buffer = util.buffer
2277 buffer = util.buffer
2238
2278
2239 chunks = []
2279 chunks = []
2240 ladd = chunks.append
2280 ladd = chunks.append
2241 for idx, item in enumerate(chain):
2281 for idx, item in enumerate(chain):
2242 offset = start(item[0])
2282 offset = start(item[0])
2243 bits = data[idx]
2283 bits = data[idx]
2244 for rev in item:
2284 for rev in item:
2245 chunkstart = start(rev)
2285 chunkstart = start(rev)
2246 if inline:
2286 if inline:
2247 chunkstart += (rev + 1) * iosize
2287 chunkstart += (rev + 1) * iosize
2248 chunklength = length(rev)
2288 chunklength = length(rev)
2249 ladd(buffer(bits, chunkstart - offset, chunklength))
2289 ladd(buffer(bits, chunkstart - offset, chunklength))
2250
2290
2251 return chunks
2291 return chunks
2252
2292
2253 def dodeltachain(rev):
2293 def dodeltachain(rev):
2254 if not cache:
2294 if not cache:
2255 r.clearcaches()
2295 r.clearcaches()
2256 r._deltachain(rev)
2296 r._deltachain(rev)
2257
2297
2258 def doread(chain):
2298 def doread(chain):
2259 if not cache:
2299 if not cache:
2260 r.clearcaches()
2300 r.clearcaches()
2261 for item in slicedchain:
2301 for item in slicedchain:
2262 segmentforrevs(item[0], item[-1])
2302 segmentforrevs(item[0], item[-1])
2263
2303
2264 def doslice(r, chain, size):
2304 def doslice(r, chain, size):
2265 for s in slicechunk(r, chain, targetsize=size):
2305 for s in slicechunk(r, chain, targetsize=size):
2266 pass
2306 pass
2267
2307
2268 def dorawchunks(data, chain):
2308 def dorawchunks(data, chain):
2269 if not cache:
2309 if not cache:
2270 r.clearcaches()
2310 r.clearcaches()
2271 getrawchunks(data, chain)
2311 getrawchunks(data, chain)
2272
2312
2273 def dodecompress(chunks):
2313 def dodecompress(chunks):
2274 decomp = r.decompress
2314 decomp = r.decompress
2275 for chunk in chunks:
2315 for chunk in chunks:
2276 decomp(chunk)
2316 decomp(chunk)
2277
2317
2278 def dopatch(text, bins):
2318 def dopatch(text, bins):
2279 if not cache:
2319 if not cache:
2280 r.clearcaches()
2320 r.clearcaches()
2281 mdiff.patches(text, bins)
2321 mdiff.patches(text, bins)
2282
2322
2283 def dohash(text):
2323 def dohash(text):
2284 if not cache:
2324 if not cache:
2285 r.clearcaches()
2325 r.clearcaches()
2286 r.checkhash(text, node, rev=rev)
2326 r.checkhash(text, node, rev=rev)
2287
2327
2288 def dorevision():
2328 def dorevision():
2289 if not cache:
2329 if not cache:
2290 r.clearcaches()
2330 r.clearcaches()
2291 r.revision(node)
2331 r.revision(node)
2292
2332
2293 try:
2333 try:
2294 from mercurial.revlogutils.deltas import slicechunk
2334 from mercurial.revlogutils.deltas import slicechunk
2295 except ImportError:
2335 except ImportError:
2296 slicechunk = getattr(revlog, '_slicechunk', None)
2336 slicechunk = getattr(revlog, '_slicechunk', None)
2297
2337
2298 size = r.length(rev)
2338 size = r.length(rev)
2299 chain = r._deltachain(rev)[0]
2339 chain = r._deltachain(rev)[0]
2300 if not getattr(r, '_withsparseread', False):
2340 if not getattr(r, '_withsparseread', False):
2301 slicedchain = (chain,)
2341 slicedchain = (chain,)
2302 else:
2342 else:
2303 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2343 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2304 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2344 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2305 rawchunks = getrawchunks(data, slicedchain)
2345 rawchunks = getrawchunks(data, slicedchain)
2306 bins = r._chunks(chain)
2346 bins = r._chunks(chain)
2307 text = bytes(bins[0])
2347 text = bytes(bins[0])
2308 bins = bins[1:]
2348 bins = bins[1:]
2309 text = mdiff.patches(text, bins)
2349 text = mdiff.patches(text, bins)
2310
2350
2311 benches = [
2351 benches = [
2312 (lambda: dorevision(), b'full'),
2352 (lambda: dorevision(), b'full'),
2313 (lambda: dodeltachain(rev), b'deltachain'),
2353 (lambda: dodeltachain(rev), b'deltachain'),
2314 (lambda: doread(chain), b'read'),
2354 (lambda: doread(chain), b'read'),
2315 ]
2355 ]
2316
2356
2317 if getattr(r, '_withsparseread', False):
2357 if getattr(r, '_withsparseread', False):
2318 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2358 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2319 benches.append(slicing)
2359 benches.append(slicing)
2320
2360
2321 benches.extend([
2361 benches.extend([
2322 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2362 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2323 (lambda: dodecompress(rawchunks), b'decompress'),
2363 (lambda: dodecompress(rawchunks), b'decompress'),
2324 (lambda: dopatch(text, bins), b'patch'),
2364 (lambda: dopatch(text, bins), b'patch'),
2325 (lambda: dohash(text), b'hash'),
2365 (lambda: dohash(text), b'hash'),
2326 ])
2366 ])
2327
2367
2328 timer, fm = gettimer(ui, opts)
2368 timer, fm = gettimer(ui, opts)
2329 for fn, title in benches:
2369 for fn, title in benches:
2330 timer(fn, title=title)
2370 timer(fn, title=title)
2331 fm.end()
2371 fm.end()
2332
2372
2333 @command(b'perfrevset',
2373 @command(b'perfrevset',
2334 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2374 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2335 (b'', b'contexts', False, b'obtain changectx for each revision')]
2375 (b'', b'contexts', False, b'obtain changectx for each revision')]
2336 + formatteropts, b"REVSET")
2376 + formatteropts, b"REVSET")
2337 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2377 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2338 """benchmark the execution time of a revset
2378 """benchmark the execution time of a revset
2339
2379
2340 Use the --clean option if need to evaluate the impact of build volatile
2380 Use the --clean option if need to evaluate the impact of build volatile
2341 revisions set cache on the revset execution. Volatile cache hold filtered
2381 revisions set cache on the revset execution. Volatile cache hold filtered
2342 and obsolete related cache."""
2382 and obsolete related cache."""
2343 opts = _byteskwargs(opts)
2383 opts = _byteskwargs(opts)
2344
2384
2345 timer, fm = gettimer(ui, opts)
2385 timer, fm = gettimer(ui, opts)
2346 def d():
2386 def d():
2347 if clear:
2387 if clear:
2348 repo.invalidatevolatilesets()
2388 repo.invalidatevolatilesets()
2349 if contexts:
2389 if contexts:
2350 for ctx in repo.set(expr): pass
2390 for ctx in repo.set(expr): pass
2351 else:
2391 else:
2352 for r in repo.revs(expr): pass
2392 for r in repo.revs(expr): pass
2353 timer(d)
2393 timer(d)
2354 fm.end()
2394 fm.end()
2355
2395
2356 @command(b'perfvolatilesets',
2396 @command(b'perfvolatilesets',
2357 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2397 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2358 ] + formatteropts)
2398 ] + formatteropts)
2359 def perfvolatilesets(ui, repo, *names, **opts):
2399 def perfvolatilesets(ui, repo, *names, **opts):
2360 """benchmark the computation of various volatile set
2400 """benchmark the computation of various volatile set
2361
2401
2362 Volatile set computes element related to filtering and obsolescence."""
2402 Volatile set computes element related to filtering and obsolescence."""
2363 opts = _byteskwargs(opts)
2403 opts = _byteskwargs(opts)
2364 timer, fm = gettimer(ui, opts)
2404 timer, fm = gettimer(ui, opts)
2365 repo = repo.unfiltered()
2405 repo = repo.unfiltered()
2366
2406
2367 def getobs(name):
2407 def getobs(name):
2368 def d():
2408 def d():
2369 repo.invalidatevolatilesets()
2409 repo.invalidatevolatilesets()
2370 if opts[b'clear_obsstore']:
2410 if opts[b'clear_obsstore']:
2371 clearfilecache(repo, b'obsstore')
2411 clearfilecache(repo, b'obsstore')
2372 obsolete.getrevs(repo, name)
2412 obsolete.getrevs(repo, name)
2373 return d
2413 return d
2374
2414
2375 allobs = sorted(obsolete.cachefuncs)
2415 allobs = sorted(obsolete.cachefuncs)
2376 if names:
2416 if names:
2377 allobs = [n for n in allobs if n in names]
2417 allobs = [n for n in allobs if n in names]
2378
2418
2379 for name in allobs:
2419 for name in allobs:
2380 timer(getobs(name), title=name)
2420 timer(getobs(name), title=name)
2381
2421
2382 def getfiltered(name):
2422 def getfiltered(name):
2383 def d():
2423 def d():
2384 repo.invalidatevolatilesets()
2424 repo.invalidatevolatilesets()
2385 if opts[b'clear_obsstore']:
2425 if opts[b'clear_obsstore']:
2386 clearfilecache(repo, b'obsstore')
2426 clearfilecache(repo, b'obsstore')
2387 repoview.filterrevs(repo, name)
2427 repoview.filterrevs(repo, name)
2388 return d
2428 return d
2389
2429
2390 allfilter = sorted(repoview.filtertable)
2430 allfilter = sorted(repoview.filtertable)
2391 if names:
2431 if names:
2392 allfilter = [n for n in allfilter if n in names]
2432 allfilter = [n for n in allfilter if n in names]
2393
2433
2394 for name in allfilter:
2434 for name in allfilter:
2395 timer(getfiltered(name), title=name)
2435 timer(getfiltered(name), title=name)
2396 fm.end()
2436 fm.end()
2397
2437
2398 @command(b'perfbranchmap',
2438 @command(b'perfbranchmap',
2399 [(b'f', b'full', False,
2439 [(b'f', b'full', False,
2400 b'Includes build time of subset'),
2440 b'Includes build time of subset'),
2401 (b'', b'clear-revbranch', False,
2441 (b'', b'clear-revbranch', False,
2402 b'purge the revbranch cache between computation'),
2442 b'purge the revbranch cache between computation'),
2403 ] + formatteropts)
2443 ] + formatteropts)
2404 def perfbranchmap(ui, repo, *filternames, **opts):
2444 def perfbranchmap(ui, repo, *filternames, **opts):
2405 """benchmark the update of a branchmap
2445 """benchmark the update of a branchmap
2406
2446
2407 This benchmarks the full repo.branchmap() call with read and write disabled
2447 This benchmarks the full repo.branchmap() call with read and write disabled
2408 """
2448 """
2409 opts = _byteskwargs(opts)
2449 opts = _byteskwargs(opts)
2410 full = opts.get(b"full", False)
2450 full = opts.get(b"full", False)
2411 clear_revbranch = opts.get(b"clear_revbranch", False)
2451 clear_revbranch = opts.get(b"clear_revbranch", False)
2412 timer, fm = gettimer(ui, opts)
2452 timer, fm = gettimer(ui, opts)
2413 def getbranchmap(filtername):
2453 def getbranchmap(filtername):
2414 """generate a benchmark function for the filtername"""
2454 """generate a benchmark function for the filtername"""
2415 if filtername is None:
2455 if filtername is None:
2416 view = repo
2456 view = repo
2417 else:
2457 else:
2418 view = repo.filtered(filtername)
2458 view = repo.filtered(filtername)
2419 if util.safehasattr(view._branchcaches, '_per_filter'):
2459 if util.safehasattr(view._branchcaches, '_per_filter'):
2420 filtered = view._branchcaches._per_filter
2460 filtered = view._branchcaches._per_filter
2421 else:
2461 else:
2422 # older versions
2462 # older versions
2423 filtered = view._branchcaches
2463 filtered = view._branchcaches
2424 def d():
2464 def d():
2425 if clear_revbranch:
2465 if clear_revbranch:
2426 repo.revbranchcache()._clear()
2466 repo.revbranchcache()._clear()
2427 if full:
2467 if full:
2428 view._branchcaches.clear()
2468 view._branchcaches.clear()
2429 else:
2469 else:
2430 filtered.pop(filtername, None)
2470 filtered.pop(filtername, None)
2431 view.branchmap()
2471 view.branchmap()
2432 return d
2472 return d
2433 # add filter in smaller subset to bigger subset
2473 # add filter in smaller subset to bigger subset
2434 possiblefilters = set(repoview.filtertable)
2474 possiblefilters = set(repoview.filtertable)
2435 if filternames:
2475 if filternames:
2436 possiblefilters &= set(filternames)
2476 possiblefilters &= set(filternames)
2437 subsettable = getbranchmapsubsettable()
2477 subsettable = getbranchmapsubsettable()
2438 allfilters = []
2478 allfilters = []
2439 while possiblefilters:
2479 while possiblefilters:
2440 for name in possiblefilters:
2480 for name in possiblefilters:
2441 subset = subsettable.get(name)
2481 subset = subsettable.get(name)
2442 if subset not in possiblefilters:
2482 if subset not in possiblefilters:
2443 break
2483 break
2444 else:
2484 else:
2445 assert False, b'subset cycle %s!' % possiblefilters
2485 assert False, b'subset cycle %s!' % possiblefilters
2446 allfilters.append(name)
2486 allfilters.append(name)
2447 possiblefilters.remove(name)
2487 possiblefilters.remove(name)
2448
2488
2449 # warm the cache
2489 # warm the cache
2450 if not full:
2490 if not full:
2451 for name in allfilters:
2491 for name in allfilters:
2452 repo.filtered(name).branchmap()
2492 repo.filtered(name).branchmap()
2453 if not filternames or b'unfiltered' in filternames:
2493 if not filternames or b'unfiltered' in filternames:
2454 # add unfiltered
2494 # add unfiltered
2455 allfilters.append(None)
2495 allfilters.append(None)
2456
2496
2457 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2497 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2458 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2498 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2459 branchcacheread.set(classmethod(lambda *args: None))
2499 branchcacheread.set(classmethod(lambda *args: None))
2460 else:
2500 else:
2461 # older versions
2501 # older versions
2462 branchcacheread = safeattrsetter(branchmap, b'read')
2502 branchcacheread = safeattrsetter(branchmap, b'read')
2463 branchcacheread.set(lambda *args: None)
2503 branchcacheread.set(lambda *args: None)
2464 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2504 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2465 branchcachewrite.set(lambda *args: None)
2505 branchcachewrite.set(lambda *args: None)
2466 try:
2506 try:
2467 for name in allfilters:
2507 for name in allfilters:
2468 printname = name
2508 printname = name
2469 if name is None:
2509 if name is None:
2470 printname = b'unfiltered'
2510 printname = b'unfiltered'
2471 timer(getbranchmap(name), title=str(printname))
2511 timer(getbranchmap(name), title=str(printname))
2472 finally:
2512 finally:
2473 branchcacheread.restore()
2513 branchcacheread.restore()
2474 branchcachewrite.restore()
2514 branchcachewrite.restore()
2475 fm.end()
2515 fm.end()
2476
2516
2477 @command(b'perfbranchmapupdate', [
2517 @command(b'perfbranchmapupdate', [
2478 (b'', b'base', [], b'subset of revision to start from'),
2518 (b'', b'base', [], b'subset of revision to start from'),
2479 (b'', b'target', [], b'subset of revision to end with'),
2519 (b'', b'target', [], b'subset of revision to end with'),
2480 (b'', b'clear-caches', False, b'clear cache between each runs')
2520 (b'', b'clear-caches', False, b'clear cache between each runs')
2481 ] + formatteropts)
2521 ] + formatteropts)
2482 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2522 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2483 """benchmark branchmap update from for <base> revs to <target> revs
2523 """benchmark branchmap update from for <base> revs to <target> revs
2484
2524
2485 If `--clear-caches` is passed, the following items will be reset before
2525 If `--clear-caches` is passed, the following items will be reset before
2486 each update:
2526 each update:
2487 * the changelog instance and associated indexes
2527 * the changelog instance and associated indexes
2488 * the rev-branch-cache instance
2528 * the rev-branch-cache instance
2489
2529
2490 Examples:
2530 Examples:
2491
2531
2492 # update for the one last revision
2532 # update for the one last revision
2493 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2533 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2494
2534
2495 $ update for change coming with a new branch
2535 $ update for change coming with a new branch
2496 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2536 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2497 """
2537 """
2498 from mercurial import branchmap
2538 from mercurial import branchmap
2499 from mercurial import repoview
2539 from mercurial import repoview
2500 opts = _byteskwargs(opts)
2540 opts = _byteskwargs(opts)
2501 timer, fm = gettimer(ui, opts)
2541 timer, fm = gettimer(ui, opts)
2502 clearcaches = opts[b'clear_caches']
2542 clearcaches = opts[b'clear_caches']
2503 unfi = repo.unfiltered()
2543 unfi = repo.unfiltered()
2504 x = [None] # used to pass data between closure
2544 x = [None] # used to pass data between closure
2505
2545
2506 # we use a `list` here to avoid possible side effect from smartset
2546 # we use a `list` here to avoid possible side effect from smartset
2507 baserevs = list(scmutil.revrange(repo, base))
2547 baserevs = list(scmutil.revrange(repo, base))
2508 targetrevs = list(scmutil.revrange(repo, target))
2548 targetrevs = list(scmutil.revrange(repo, target))
2509 if not baserevs:
2549 if not baserevs:
2510 raise error.Abort(b'no revisions selected for --base')
2550 raise error.Abort(b'no revisions selected for --base')
2511 if not targetrevs:
2551 if not targetrevs:
2512 raise error.Abort(b'no revisions selected for --target')
2552 raise error.Abort(b'no revisions selected for --target')
2513
2553
2514 # make sure the target branchmap also contains the one in the base
2554 # make sure the target branchmap also contains the one in the base
2515 targetrevs = list(set(baserevs) | set(targetrevs))
2555 targetrevs = list(set(baserevs) | set(targetrevs))
2516 targetrevs.sort()
2556 targetrevs.sort()
2517
2557
2518 cl = repo.changelog
2558 cl = repo.changelog
2519 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2559 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2520 allbaserevs.sort()
2560 allbaserevs.sort()
2521 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2561 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2522
2562
2523 newrevs = list(alltargetrevs.difference(allbaserevs))
2563 newrevs = list(alltargetrevs.difference(allbaserevs))
2524 newrevs.sort()
2564 newrevs.sort()
2525
2565
2526 allrevs = frozenset(unfi.changelog.revs())
2566 allrevs = frozenset(unfi.changelog.revs())
2527 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2567 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2528 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2568 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2529
2569
2530 def basefilter(repo, visibilityexceptions=None):
2570 def basefilter(repo, visibilityexceptions=None):
2531 return basefilterrevs
2571 return basefilterrevs
2532
2572
2533 def targetfilter(repo, visibilityexceptions=None):
2573 def targetfilter(repo, visibilityexceptions=None):
2534 return targetfilterrevs
2574 return targetfilterrevs
2535
2575
2536 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2576 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2537 ui.status(msg % (len(allbaserevs), len(newrevs)))
2577 ui.status(msg % (len(allbaserevs), len(newrevs)))
2538 if targetfilterrevs:
2578 if targetfilterrevs:
2539 msg = b'(%d revisions still filtered)\n'
2579 msg = b'(%d revisions still filtered)\n'
2540 ui.status(msg % len(targetfilterrevs))
2580 ui.status(msg % len(targetfilterrevs))
2541
2581
2542 try:
2582 try:
2543 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2583 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2544 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2584 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2545
2585
2546 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2586 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2547 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2587 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2548
2588
2549 # try to find an existing branchmap to reuse
2589 # try to find an existing branchmap to reuse
2550 subsettable = getbranchmapsubsettable()
2590 subsettable = getbranchmapsubsettable()
2551 candidatefilter = subsettable.get(None)
2591 candidatefilter = subsettable.get(None)
2552 while candidatefilter is not None:
2592 while candidatefilter is not None:
2553 candidatebm = repo.filtered(candidatefilter).branchmap()
2593 candidatebm = repo.filtered(candidatefilter).branchmap()
2554 if candidatebm.validfor(baserepo):
2594 if candidatebm.validfor(baserepo):
2555 filtered = repoview.filterrevs(repo, candidatefilter)
2595 filtered = repoview.filterrevs(repo, candidatefilter)
2556 missing = [r for r in allbaserevs if r in filtered]
2596 missing = [r for r in allbaserevs if r in filtered]
2557 base = candidatebm.copy()
2597 base = candidatebm.copy()
2558 base.update(baserepo, missing)
2598 base.update(baserepo, missing)
2559 break
2599 break
2560 candidatefilter = subsettable.get(candidatefilter)
2600 candidatefilter = subsettable.get(candidatefilter)
2561 else:
2601 else:
2562 # no suitable subset where found
2602 # no suitable subset where found
2563 base = branchmap.branchcache()
2603 base = branchmap.branchcache()
2564 base.update(baserepo, allbaserevs)
2604 base.update(baserepo, allbaserevs)
2565
2605
2566 def setup():
2606 def setup():
2567 x[0] = base.copy()
2607 x[0] = base.copy()
2568 if clearcaches:
2608 if clearcaches:
2569 unfi._revbranchcache = None
2609 unfi._revbranchcache = None
2570 clearchangelog(repo)
2610 clearchangelog(repo)
2571
2611
2572 def bench():
2612 def bench():
2573 x[0].update(targetrepo, newrevs)
2613 x[0].update(targetrepo, newrevs)
2574
2614
2575 timer(bench, setup=setup)
2615 timer(bench, setup=setup)
2576 fm.end()
2616 fm.end()
2577 finally:
2617 finally:
2578 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2618 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2579 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2619 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2580
2620
2581 @command(b'perfbranchmapload', [
2621 @command(b'perfbranchmapload', [
2582 (b'f', b'filter', b'', b'Specify repoview filter'),
2622 (b'f', b'filter', b'', b'Specify repoview filter'),
2583 (b'', b'list', False, b'List brachmap filter caches'),
2623 (b'', b'list', False, b'List brachmap filter caches'),
2584 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2624 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2585
2625
2586 ] + formatteropts)
2626 ] + formatteropts)
2587 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2627 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2588 """benchmark reading the branchmap"""
2628 """benchmark reading the branchmap"""
2589 opts = _byteskwargs(opts)
2629 opts = _byteskwargs(opts)
2590 clearrevlogs = opts[b'clear_revlogs']
2630 clearrevlogs = opts[b'clear_revlogs']
2591
2631
2592 if list:
2632 if list:
2593 for name, kind, st in repo.cachevfs.readdir(stat=True):
2633 for name, kind, st in repo.cachevfs.readdir(stat=True):
2594 if name.startswith(b'branch2'):
2634 if name.startswith(b'branch2'):
2595 filtername = name.partition(b'-')[2] or b'unfiltered'
2635 filtername = name.partition(b'-')[2] or b'unfiltered'
2596 ui.status(b'%s - %s\n'
2636 ui.status(b'%s - %s\n'
2597 % (filtername, util.bytecount(st.st_size)))
2637 % (filtername, util.bytecount(st.st_size)))
2598 return
2638 return
2599 if not filter:
2639 if not filter:
2600 filter = None
2640 filter = None
2601 subsettable = getbranchmapsubsettable()
2641 subsettable = getbranchmapsubsettable()
2602 if filter is None:
2642 if filter is None:
2603 repo = repo.unfiltered()
2643 repo = repo.unfiltered()
2604 else:
2644 else:
2605 repo = repoview.repoview(repo, filter)
2645 repo = repoview.repoview(repo, filter)
2606
2646
2607 repo.branchmap() # make sure we have a relevant, up to date branchmap
2647 repo.branchmap() # make sure we have a relevant, up to date branchmap
2608
2648
2609 try:
2649 try:
2610 fromfile = branchmap.branchcache.fromfile
2650 fromfile = branchmap.branchcache.fromfile
2611 except AttributeError:
2651 except AttributeError:
2612 # older versions
2652 # older versions
2613 fromfile = branchmap.read
2653 fromfile = branchmap.read
2614
2654
2615 currentfilter = filter
2655 currentfilter = filter
2616 # try once without timer, the filter may not be cached
2656 # try once without timer, the filter may not be cached
2617 while fromfile(repo) is None:
2657 while fromfile(repo) is None:
2618 currentfilter = subsettable.get(currentfilter)
2658 currentfilter = subsettable.get(currentfilter)
2619 if currentfilter is None:
2659 if currentfilter is None:
2620 raise error.Abort(b'No branchmap cached for %s repo'
2660 raise error.Abort(b'No branchmap cached for %s repo'
2621 % (filter or b'unfiltered'))
2661 % (filter or b'unfiltered'))
2622 repo = repo.filtered(currentfilter)
2662 repo = repo.filtered(currentfilter)
2623 timer, fm = gettimer(ui, opts)
2663 timer, fm = gettimer(ui, opts)
2624 def setup():
2664 def setup():
2625 if clearrevlogs:
2665 if clearrevlogs:
2626 clearchangelog(repo)
2666 clearchangelog(repo)
2627 def bench():
2667 def bench():
2628 fromfile(repo)
2668 fromfile(repo)
2629 timer(bench, setup=setup)
2669 timer(bench, setup=setup)
2630 fm.end()
2670 fm.end()
2631
2671
2632 @command(b'perfloadmarkers')
2672 @command(b'perfloadmarkers')
2633 def perfloadmarkers(ui, repo):
2673 def perfloadmarkers(ui, repo):
2634 """benchmark the time to parse the on-disk markers for a repo
2674 """benchmark the time to parse the on-disk markers for a repo
2635
2675
2636 Result is the number of markers in the repo."""
2676 Result is the number of markers in the repo."""
2637 timer, fm = gettimer(ui)
2677 timer, fm = gettimer(ui)
2638 svfs = getsvfs(repo)
2678 svfs = getsvfs(repo)
2639 timer(lambda: len(obsolete.obsstore(svfs)))
2679 timer(lambda: len(obsolete.obsstore(svfs)))
2640 fm.end()
2680 fm.end()
2641
2681
2642 @command(b'perflrucachedict', formatteropts +
2682 @command(b'perflrucachedict', formatteropts +
2643 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2683 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2644 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2684 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2645 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2685 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2646 (b'', b'size', 4, b'size of cache'),
2686 (b'', b'size', 4, b'size of cache'),
2647 (b'', b'gets', 10000, b'number of key lookups'),
2687 (b'', b'gets', 10000, b'number of key lookups'),
2648 (b'', b'sets', 10000, b'number of key sets'),
2688 (b'', b'sets', 10000, b'number of key sets'),
2649 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2689 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2650 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2690 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2651 norepo=True)
2691 norepo=True)
2652 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2692 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2653 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2693 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2654 opts = _byteskwargs(opts)
2694 opts = _byteskwargs(opts)
2655
2695
2656 def doinit():
2696 def doinit():
2657 for i in _xrange(10000):
2697 for i in _xrange(10000):
2658 util.lrucachedict(size)
2698 util.lrucachedict(size)
2659
2699
2660 costrange = list(range(mincost, maxcost + 1))
2700 costrange = list(range(mincost, maxcost + 1))
2661
2701
2662 values = []
2702 values = []
2663 for i in _xrange(size):
2703 for i in _xrange(size):
2664 values.append(random.randint(0, _maxint))
2704 values.append(random.randint(0, _maxint))
2665
2705
2666 # Get mode fills the cache and tests raw lookup performance with no
2706 # Get mode fills the cache and tests raw lookup performance with no
2667 # eviction.
2707 # eviction.
2668 getseq = []
2708 getseq = []
2669 for i in _xrange(gets):
2709 for i in _xrange(gets):
2670 getseq.append(random.choice(values))
2710 getseq.append(random.choice(values))
2671
2711
2672 def dogets():
2712 def dogets():
2673 d = util.lrucachedict(size)
2713 d = util.lrucachedict(size)
2674 for v in values:
2714 for v in values:
2675 d[v] = v
2715 d[v] = v
2676 for key in getseq:
2716 for key in getseq:
2677 value = d[key]
2717 value = d[key]
2678 value # silence pyflakes warning
2718 value # silence pyflakes warning
2679
2719
2680 def dogetscost():
2720 def dogetscost():
2681 d = util.lrucachedict(size, maxcost=costlimit)
2721 d = util.lrucachedict(size, maxcost=costlimit)
2682 for i, v in enumerate(values):
2722 for i, v in enumerate(values):
2683 d.insert(v, v, cost=costs[i])
2723 d.insert(v, v, cost=costs[i])
2684 for key in getseq:
2724 for key in getseq:
2685 try:
2725 try:
2686 value = d[key]
2726 value = d[key]
2687 value # silence pyflakes warning
2727 value # silence pyflakes warning
2688 except KeyError:
2728 except KeyError:
2689 pass
2729 pass
2690
2730
2691 # Set mode tests insertion speed with cache eviction.
2731 # Set mode tests insertion speed with cache eviction.
2692 setseq = []
2732 setseq = []
2693 costs = []
2733 costs = []
2694 for i in _xrange(sets):
2734 for i in _xrange(sets):
2695 setseq.append(random.randint(0, _maxint))
2735 setseq.append(random.randint(0, _maxint))
2696 costs.append(random.choice(costrange))
2736 costs.append(random.choice(costrange))
2697
2737
2698 def doinserts():
2738 def doinserts():
2699 d = util.lrucachedict(size)
2739 d = util.lrucachedict(size)
2700 for v in setseq:
2740 for v in setseq:
2701 d.insert(v, v)
2741 d.insert(v, v)
2702
2742
2703 def doinsertscost():
2743 def doinsertscost():
2704 d = util.lrucachedict(size, maxcost=costlimit)
2744 d = util.lrucachedict(size, maxcost=costlimit)
2705 for i, v in enumerate(setseq):
2745 for i, v in enumerate(setseq):
2706 d.insert(v, v, cost=costs[i])
2746 d.insert(v, v, cost=costs[i])
2707
2747
2708 def dosets():
2748 def dosets():
2709 d = util.lrucachedict(size)
2749 d = util.lrucachedict(size)
2710 for v in setseq:
2750 for v in setseq:
2711 d[v] = v
2751 d[v] = v
2712
2752
2713 # Mixed mode randomly performs gets and sets with eviction.
2753 # Mixed mode randomly performs gets and sets with eviction.
2714 mixedops = []
2754 mixedops = []
2715 for i in _xrange(mixed):
2755 for i in _xrange(mixed):
2716 r = random.randint(0, 100)
2756 r = random.randint(0, 100)
2717 if r < mixedgetfreq:
2757 if r < mixedgetfreq:
2718 op = 0
2758 op = 0
2719 else:
2759 else:
2720 op = 1
2760 op = 1
2721
2761
2722 mixedops.append((op,
2762 mixedops.append((op,
2723 random.randint(0, size * 2),
2763 random.randint(0, size * 2),
2724 random.choice(costrange)))
2764 random.choice(costrange)))
2725
2765
2726 def domixed():
2766 def domixed():
2727 d = util.lrucachedict(size)
2767 d = util.lrucachedict(size)
2728
2768
2729 for op, v, cost in mixedops:
2769 for op, v, cost in mixedops:
2730 if op == 0:
2770 if op == 0:
2731 try:
2771 try:
2732 d[v]
2772 d[v]
2733 except KeyError:
2773 except KeyError:
2734 pass
2774 pass
2735 else:
2775 else:
2736 d[v] = v
2776 d[v] = v
2737
2777
2738 def domixedcost():
2778 def domixedcost():
2739 d = util.lrucachedict(size, maxcost=costlimit)
2779 d = util.lrucachedict(size, maxcost=costlimit)
2740
2780
2741 for op, v, cost in mixedops:
2781 for op, v, cost in mixedops:
2742 if op == 0:
2782 if op == 0:
2743 try:
2783 try:
2744 d[v]
2784 d[v]
2745 except KeyError:
2785 except KeyError:
2746 pass
2786 pass
2747 else:
2787 else:
2748 d.insert(v, v, cost=cost)
2788 d.insert(v, v, cost=cost)
2749
2789
2750 benches = [
2790 benches = [
2751 (doinit, b'init'),
2791 (doinit, b'init'),
2752 ]
2792 ]
2753
2793
2754 if costlimit:
2794 if costlimit:
2755 benches.extend([
2795 benches.extend([
2756 (dogetscost, b'gets w/ cost limit'),
2796 (dogetscost, b'gets w/ cost limit'),
2757 (doinsertscost, b'inserts w/ cost limit'),
2797 (doinsertscost, b'inserts w/ cost limit'),
2758 (domixedcost, b'mixed w/ cost limit'),
2798 (domixedcost, b'mixed w/ cost limit'),
2759 ])
2799 ])
2760 else:
2800 else:
2761 benches.extend([
2801 benches.extend([
2762 (dogets, b'gets'),
2802 (dogets, b'gets'),
2763 (doinserts, b'inserts'),
2803 (doinserts, b'inserts'),
2764 (dosets, b'sets'),
2804 (dosets, b'sets'),
2765 (domixed, b'mixed')
2805 (domixed, b'mixed')
2766 ])
2806 ])
2767
2807
2768 for fn, title in benches:
2808 for fn, title in benches:
2769 timer, fm = gettimer(ui, opts)
2809 timer, fm = gettimer(ui, opts)
2770 timer(fn, title=title)
2810 timer(fn, title=title)
2771 fm.end()
2811 fm.end()
2772
2812
2773 @command(b'perfwrite', formatteropts)
2813 @command(b'perfwrite', formatteropts)
2774 def perfwrite(ui, repo, **opts):
2814 def perfwrite(ui, repo, **opts):
2775 """microbenchmark ui.write
2815 """microbenchmark ui.write
2776 """
2816 """
2777 opts = _byteskwargs(opts)
2817 opts = _byteskwargs(opts)
2778
2818
2779 timer, fm = gettimer(ui, opts)
2819 timer, fm = gettimer(ui, opts)
2780 def write():
2820 def write():
2781 for i in range(100000):
2821 for i in range(100000):
2782 ui.write((b'Testing write performance\n'))
2822 ui.write((b'Testing write performance\n'))
2783 timer(write)
2823 timer(write)
2784 fm.end()
2824 fm.end()
2785
2825
2786 def uisetup(ui):
2826 def uisetup(ui):
2787 if (util.safehasattr(cmdutil, b'openrevlog') and
2827 if (util.safehasattr(cmdutil, b'openrevlog') and
2788 not util.safehasattr(commands, b'debugrevlogopts')):
2828 not util.safehasattr(commands, b'debugrevlogopts')):
2789 # for "historical portability":
2829 # for "historical portability":
2790 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2830 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2791 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2831 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2792 # openrevlog() should cause failure, because it has been
2832 # openrevlog() should cause failure, because it has been
2793 # available since 3.5 (or 49c583ca48c4).
2833 # available since 3.5 (or 49c583ca48c4).
2794 def openrevlog(orig, repo, cmd, file_, opts):
2834 def openrevlog(orig, repo, cmd, file_, opts):
2795 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2835 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2796 raise error.Abort(b"This version doesn't support --dir option",
2836 raise error.Abort(b"This version doesn't support --dir option",
2797 hint=b"use 3.5 or later")
2837 hint=b"use 3.5 or later")
2798 return orig(repo, cmd, file_, opts)
2838 return orig(repo, cmd, file_, opts)
2799 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2839 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2800
2840
2801 @command(b'perfprogress', formatteropts + [
2841 @command(b'perfprogress', formatteropts + [
2802 (b'', b'topic', b'topic', b'topic for progress messages'),
2842 (b'', b'topic', b'topic', b'topic for progress messages'),
2803 (b'c', b'total', 1000000, b'total value we are progressing to'),
2843 (b'c', b'total', 1000000, b'total value we are progressing to'),
2804 ], norepo=True)
2844 ], norepo=True)
2805 def perfprogress(ui, topic=None, total=None, **opts):
2845 def perfprogress(ui, topic=None, total=None, **opts):
2806 """printing of progress bars"""
2846 """printing of progress bars"""
2807 opts = _byteskwargs(opts)
2847 opts = _byteskwargs(opts)
2808
2848
2809 timer, fm = gettimer(ui, opts)
2849 timer, fm = gettimer(ui, opts)
2810
2850
2811 def doprogress():
2851 def doprogress():
2812 with ui.makeprogress(topic, total=total) as progress:
2852 with ui.makeprogress(topic, total=total) as progress:
2813 for i in pycompat.xrange(total):
2853 for i in pycompat.xrange(total):
2814 progress.increment()
2854 progress.increment()
2815
2855
2816 timer(doprogress)
2856 timer(doprogress)
2817 fm.end()
2857 fm.end()
@@ -1,320 +1,355 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistic will be reported for each benchmark: best,
51 When set, additional statistic will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of run (default: 1)
56 number of second to wait before any group of run (default: 1)
57
57
58 "run-limits"
59 Control the number of run each benchmark will perform. The option value
60 should be a list of '<time>-<numberofrun>' pairs. After each run the
61 condition are considered in order with the following logic:
62
63 If benchmark have been running for <time> seconds, and we have performed
64 <numberofrun> iterations, stop the benchmark,
65
66 The default value is: '3.0-100, 10.0-3'
67
58 "stub"
68 "stub"
59 When set, benchmark will only be run once, useful for testing (default:
69 When set, benchmark will only be run once, useful for testing (default:
60 off)
70 off)
61
71
62 list of commands:
72 list of commands:
63
73
64 perfaddremove
74 perfaddremove
65 (no help text available)
75 (no help text available)
66 perfancestors
76 perfancestors
67 (no help text available)
77 (no help text available)
68 perfancestorset
78 perfancestorset
69 (no help text available)
79 (no help text available)
70 perfannotate (no help text available)
80 perfannotate (no help text available)
71 perfbdiff benchmark a bdiff between revisions
81 perfbdiff benchmark a bdiff between revisions
72 perfbookmarks
82 perfbookmarks
73 benchmark parsing bookmarks from disk to memory
83 benchmark parsing bookmarks from disk to memory
74 perfbranchmap
84 perfbranchmap
75 benchmark the update of a branchmap
85 benchmark the update of a branchmap
76 perfbranchmapload
86 perfbranchmapload
77 benchmark reading the branchmap
87 benchmark reading the branchmap
78 perfbranchmapupdate
88 perfbranchmapupdate
79 benchmark branchmap update from for <base> revs to <target>
89 benchmark branchmap update from for <base> revs to <target>
80 revs
90 revs
81 perfbundleread
91 perfbundleread
82 Benchmark reading of bundle files.
92 Benchmark reading of bundle files.
83 perfcca (no help text available)
93 perfcca (no help text available)
84 perfchangegroupchangelog
94 perfchangegroupchangelog
85 Benchmark producing a changelog group for a changegroup.
95 Benchmark producing a changelog group for a changegroup.
86 perfchangeset
96 perfchangeset
87 (no help text available)
97 (no help text available)
88 perfctxfiles (no help text available)
98 perfctxfiles (no help text available)
89 perfdiffwd Profile diff of working directory changes
99 perfdiffwd Profile diff of working directory changes
90 perfdirfoldmap
100 perfdirfoldmap
91 (no help text available)
101 (no help text available)
92 perfdirs (no help text available)
102 perfdirs (no help text available)
93 perfdirstate (no help text available)
103 perfdirstate (no help text available)
94 perfdirstatedirs
104 perfdirstatedirs
95 (no help text available)
105 (no help text available)
96 perfdirstatefoldmap
106 perfdirstatefoldmap
97 (no help text available)
107 (no help text available)
98 perfdirstatewrite
108 perfdirstatewrite
99 (no help text available)
109 (no help text available)
100 perfdiscovery
110 perfdiscovery
101 benchmark discovery between local repo and the peer at given
111 benchmark discovery between local repo and the peer at given
102 path
112 path
103 perffncacheencode
113 perffncacheencode
104 (no help text available)
114 (no help text available)
105 perffncacheload
115 perffncacheload
106 (no help text available)
116 (no help text available)
107 perffncachewrite
117 perffncachewrite
108 (no help text available)
118 (no help text available)
109 perfheads benchmark the computation of a changelog heads
119 perfheads benchmark the computation of a changelog heads
110 perfhelper-pathcopies
120 perfhelper-pathcopies
111 find statistic about potential parameters for the
121 find statistic about potential parameters for the
112 'perftracecopies'
122 'perftracecopies'
113 perfignore benchmark operation related to computing ignore
123 perfignore benchmark operation related to computing ignore
114 perfindex benchmark index creation time followed by a lookup
124 perfindex benchmark index creation time followed by a lookup
115 perflinelogedits
125 perflinelogedits
116 (no help text available)
126 (no help text available)
117 perfloadmarkers
127 perfloadmarkers
118 benchmark the time to parse the on-disk markers for a repo
128 benchmark the time to parse the on-disk markers for a repo
119 perflog (no help text available)
129 perflog (no help text available)
120 perflookup (no help text available)
130 perflookup (no help text available)
121 perflrucachedict
131 perflrucachedict
122 (no help text available)
132 (no help text available)
123 perfmanifest benchmark the time to read a manifest from disk and return a
133 perfmanifest benchmark the time to read a manifest from disk and return a
124 usable
134 usable
125 perfmergecalculate
135 perfmergecalculate
126 (no help text available)
136 (no help text available)
127 perfmoonwalk benchmark walking the changelog backwards
137 perfmoonwalk benchmark walking the changelog backwards
128 perfnodelookup
138 perfnodelookup
129 (no help text available)
139 (no help text available)
130 perfnodemap benchmark the time necessary to look up revision from a cold
140 perfnodemap benchmark the time necessary to look up revision from a cold
131 nodemap
141 nodemap
132 perfparents benchmark the time necessary to fetch one changeset's parents.
142 perfparents benchmark the time necessary to fetch one changeset's parents.
133 perfpathcopies
143 perfpathcopies
134 benchmark the copy tracing logic
144 benchmark the copy tracing logic
135 perfphases benchmark phasesets computation
145 perfphases benchmark phasesets computation
136 perfphasesremote
146 perfphasesremote
137 benchmark time needed to analyse phases of the remote server
147 benchmark time needed to analyse phases of the remote server
138 perfprogress printing of progress bars
148 perfprogress printing of progress bars
139 perfrawfiles (no help text available)
149 perfrawfiles (no help text available)
140 perfrevlogchunks
150 perfrevlogchunks
141 Benchmark operations on revlog chunks.
151 Benchmark operations on revlog chunks.
142 perfrevlogindex
152 perfrevlogindex
143 Benchmark operations against a revlog index.
153 Benchmark operations against a revlog index.
144 perfrevlogrevision
154 perfrevlogrevision
145 Benchmark obtaining a revlog revision.
155 Benchmark obtaining a revlog revision.
146 perfrevlogrevisions
156 perfrevlogrevisions
147 Benchmark reading a series of revisions from a revlog.
157 Benchmark reading a series of revisions from a revlog.
148 perfrevlogwrite
158 perfrevlogwrite
149 Benchmark writing a series of revisions to a revlog.
159 Benchmark writing a series of revisions to a revlog.
150 perfrevrange (no help text available)
160 perfrevrange (no help text available)
151 perfrevset benchmark the execution time of a revset
161 perfrevset benchmark the execution time of a revset
152 perfstartup (no help text available)
162 perfstartup (no help text available)
153 perfstatus (no help text available)
163 perfstatus (no help text available)
154 perftags (no help text available)
164 perftags (no help text available)
155 perftemplating
165 perftemplating
156 test the rendering time of a given template
166 test the rendering time of a given template
157 perfunidiff benchmark a unified diff between revisions
167 perfunidiff benchmark a unified diff between revisions
158 perfvolatilesets
168 perfvolatilesets
159 benchmark the computation of various volatile set
169 benchmark the computation of various volatile set
160 perfwalk (no help text available)
170 perfwalk (no help text available)
161 perfwrite microbenchmark ui.write
171 perfwrite microbenchmark ui.write
162
172
163 (use 'hg help -v perf' to show built-in aliases and global options)
173 (use 'hg help -v perf' to show built-in aliases and global options)
164 $ hg perfaddremove
174 $ hg perfaddremove
165 $ hg perfancestors
175 $ hg perfancestors
166 $ hg perfancestorset 2
176 $ hg perfancestorset 2
167 $ hg perfannotate a
177 $ hg perfannotate a
168 $ hg perfbdiff -c 1
178 $ hg perfbdiff -c 1
169 $ hg perfbdiff --alldata 1
179 $ hg perfbdiff --alldata 1
170 $ hg perfunidiff -c 1
180 $ hg perfunidiff -c 1
171 $ hg perfunidiff --alldata 1
181 $ hg perfunidiff --alldata 1
172 $ hg perfbookmarks
182 $ hg perfbookmarks
173 $ hg perfbranchmap
183 $ hg perfbranchmap
174 $ hg perfbranchmapload
184 $ hg perfbranchmapload
175 $ hg perfbranchmapupdate --base "not tip" --target "tip"
185 $ hg perfbranchmapupdate --base "not tip" --target "tip"
176 benchmark of branchmap with 3 revisions with 1 new ones
186 benchmark of branchmap with 3 revisions with 1 new ones
177 $ hg perfcca
187 $ hg perfcca
178 $ hg perfchangegroupchangelog
188 $ hg perfchangegroupchangelog
179 $ hg perfchangegroupchangelog --cgversion 01
189 $ hg perfchangegroupchangelog --cgversion 01
180 $ hg perfchangeset 2
190 $ hg perfchangeset 2
181 $ hg perfctxfiles 2
191 $ hg perfctxfiles 2
182 $ hg perfdiffwd
192 $ hg perfdiffwd
183 $ hg perfdirfoldmap
193 $ hg perfdirfoldmap
184 $ hg perfdirs
194 $ hg perfdirs
185 $ hg perfdirstate
195 $ hg perfdirstate
186 $ hg perfdirstatedirs
196 $ hg perfdirstatedirs
187 $ hg perfdirstatefoldmap
197 $ hg perfdirstatefoldmap
188 $ hg perfdirstatewrite
198 $ hg perfdirstatewrite
189 #if repofncache
199 #if repofncache
190 $ hg perffncacheencode
200 $ hg perffncacheencode
191 $ hg perffncacheload
201 $ hg perffncacheload
192 $ hg debugrebuildfncache
202 $ hg debugrebuildfncache
193 fncache already up to date
203 fncache already up to date
194 $ hg perffncachewrite
204 $ hg perffncachewrite
195 $ hg debugrebuildfncache
205 $ hg debugrebuildfncache
196 fncache already up to date
206 fncache already up to date
197 #endif
207 #endif
198 $ hg perfheads
208 $ hg perfheads
199 $ hg perfignore
209 $ hg perfignore
200 $ hg perfindex
210 $ hg perfindex
201 $ hg perflinelogedits -n 1
211 $ hg perflinelogedits -n 1
202 $ hg perfloadmarkers
212 $ hg perfloadmarkers
203 $ hg perflog
213 $ hg perflog
204 $ hg perflookup 2
214 $ hg perflookup 2
205 $ hg perflrucache
215 $ hg perflrucache
206 $ hg perfmanifest 2
216 $ hg perfmanifest 2
207 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
217 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
208 $ hg perfmanifest -m 44fe2c8352bb
218 $ hg perfmanifest -m 44fe2c8352bb
209 abort: manifest revision must be integer or full node
219 abort: manifest revision must be integer or full node
210 [255]
220 [255]
211 $ hg perfmergecalculate -r 3
221 $ hg perfmergecalculate -r 3
212 $ hg perfmoonwalk
222 $ hg perfmoonwalk
213 $ hg perfnodelookup 2
223 $ hg perfnodelookup 2
214 $ hg perfpathcopies 1 2
224 $ hg perfpathcopies 1 2
215 $ hg perfprogress --total 1000
225 $ hg perfprogress --total 1000
216 $ hg perfrawfiles 2
226 $ hg perfrawfiles 2
217 $ hg perfrevlogindex -c
227 $ hg perfrevlogindex -c
218 #if reporevlogstore
228 #if reporevlogstore
219 $ hg perfrevlogrevisions .hg/store/data/a.i
229 $ hg perfrevlogrevisions .hg/store/data/a.i
220 #endif
230 #endif
221 $ hg perfrevlogrevision -m 0
231 $ hg perfrevlogrevision -m 0
222 $ hg perfrevlogchunks -c
232 $ hg perfrevlogchunks -c
223 $ hg perfrevrange
233 $ hg perfrevrange
224 $ hg perfrevset 'all()'
234 $ hg perfrevset 'all()'
225 $ hg perfstartup
235 $ hg perfstartup
226 $ hg perfstatus
236 $ hg perfstatus
227 $ hg perftags
237 $ hg perftags
228 $ hg perftemplating
238 $ hg perftemplating
229 $ hg perfvolatilesets
239 $ hg perfvolatilesets
230 $ hg perfwalk
240 $ hg perfwalk
231 $ hg perfparents
241 $ hg perfparents
232 $ hg perfdiscovery -q .
242 $ hg perfdiscovery -q .
233
243
244 Test run control
245 ----------------
246
247 Simple single entry
248
249 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
250 ! wall * comb * user * sys * (best of 15) (glob)
251
252 Multiple entries
253
254 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
255 ! wall * comb * user * sys * (best of 5) (glob)
256
257 error case are ignored
258
259 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
260 malformatted run limit entry, missing "-": 500
261 ! wall * comb * user * sys * (best of 5) (glob)
262 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
263 malformatted run limit entry, could not convert string to float: aaa: aaa-12
264 ! wall * comb * user * sys * (best of 5) (glob)
265 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
266 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
267 ! wall * comb * user * sys * (best of 5) (glob)
268
234 test actual output
269 test actual output
235 ------------------
270 ------------------
236
271
237 normal output:
272 normal output:
238
273
239 $ hg perfheads --config perf.stub=no
274 $ hg perfheads --config perf.stub=no
240 ! wall * comb * user * sys * (best of *) (glob)
275 ! wall * comb * user * sys * (best of *) (glob)
241
276
242 detailed output:
277 detailed output:
243
278
244 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
279 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
245 ! wall * comb * user * sys * (best of *) (glob)
280 ! wall * comb * user * sys * (best of *) (glob)
246 ! wall * comb * user * sys * (max of *) (glob)
281 ! wall * comb * user * sys * (max of *) (glob)
247 ! wall * comb * user * sys * (avg of *) (glob)
282 ! wall * comb * user * sys * (avg of *) (glob)
248 ! wall * comb * user * sys * (median of *) (glob)
283 ! wall * comb * user * sys * (median of *) (glob)
249
284
250 test json output
285 test json output
251 ----------------
286 ----------------
252
287
253 normal output:
288 normal output:
254
289
255 $ hg perfheads --template json --config perf.stub=no
290 $ hg perfheads --template json --config perf.stub=no
256 [
291 [
257 {
292 {
258 "comb": *, (glob)
293 "comb": *, (glob)
259 "count": *, (glob)
294 "count": *, (glob)
260 "sys": *, (glob)
295 "sys": *, (glob)
261 "user": *, (glob)
296 "user": *, (glob)
262 "wall": * (glob)
297 "wall": * (glob)
263 }
298 }
264 ]
299 ]
265
300
266 detailed output:
301 detailed output:
267
302
268 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
303 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
269 [
304 [
270 {
305 {
271 "avg.comb": *, (glob)
306 "avg.comb": *, (glob)
272 "avg.count": *, (glob)
307 "avg.count": *, (glob)
273 "avg.sys": *, (glob)
308 "avg.sys": *, (glob)
274 "avg.user": *, (glob)
309 "avg.user": *, (glob)
275 "avg.wall": *, (glob)
310 "avg.wall": *, (glob)
276 "comb": *, (glob)
311 "comb": *, (glob)
277 "count": *, (glob)
312 "count": *, (glob)
278 "max.comb": *, (glob)
313 "max.comb": *, (glob)
279 "max.count": *, (glob)
314 "max.count": *, (glob)
280 "max.sys": *, (glob)
315 "max.sys": *, (glob)
281 "max.user": *, (glob)
316 "max.user": *, (glob)
282 "max.wall": *, (glob)
317 "max.wall": *, (glob)
283 "median.comb": *, (glob)
318 "median.comb": *, (glob)
284 "median.count": *, (glob)
319 "median.count": *, (glob)
285 "median.sys": *, (glob)
320 "median.sys": *, (glob)
286 "median.user": *, (glob)
321 "median.user": *, (glob)
287 "median.wall": *, (glob)
322 "median.wall": *, (glob)
288 "sys": *, (glob)
323 "sys": *, (glob)
289 "user": *, (glob)
324 "user": *, (glob)
290 "wall": * (glob)
325 "wall": * (glob)
291 }
326 }
292 ]
327 ]
293
328
294 Check perf.py for historical portability
329 Check perf.py for historical portability
295 ----------------------------------------
330 ----------------------------------------
296
331
297 $ cd "$TESTDIR/.."
332 $ cd "$TESTDIR/.."
298
333
299 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
334 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
300 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
335 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
301 > "$TESTDIR"/check-perf-code.py contrib/perf.py
336 > "$TESTDIR"/check-perf-code.py contrib/perf.py
302 contrib/perf.py:\d+: (re)
337 contrib/perf.py:\d+: (re)
303 > from mercurial import (
338 > from mercurial import (
304 import newer module separately in try clause for early Mercurial
339 import newer module separately in try clause for early Mercurial
305 contrib/perf.py:\d+: (re)
340 contrib/perf.py:\d+: (re)
306 > from mercurial import (
341 > from mercurial import (
307 import newer module separately in try clause for early Mercurial
342 import newer module separately in try clause for early Mercurial
308 contrib/perf.py:\d+: (re)
343 contrib/perf.py:\d+: (re)
309 > origindexpath = orig.opener.join(orig.indexfile)
344 > origindexpath = orig.opener.join(orig.indexfile)
310 use getvfs()/getsvfs() for early Mercurial
345 use getvfs()/getsvfs() for early Mercurial
311 contrib/perf.py:\d+: (re)
346 contrib/perf.py:\d+: (re)
312 > origdatapath = orig.opener.join(orig.datafile)
347 > origdatapath = orig.opener.join(orig.datafile)
313 use getvfs()/getsvfs() for early Mercurial
348 use getvfs()/getsvfs() for early Mercurial
314 contrib/perf.py:\d+: (re)
349 contrib/perf.py:\d+: (re)
315 > vfs = vfsmod.vfs(tmpdir)
350 > vfs = vfsmod.vfs(tmpdir)
316 use getvfs()/getsvfs() for early Mercurial
351 use getvfs()/getsvfs() for early Mercurial
317 contrib/perf.py:\d+: (re)
352 contrib/perf.py:\d+: (re)
318 > vfs.options = getattr(orig.opener, 'options', None)
353 > vfs.options = getattr(orig.opener, 'options', None)
319 use getvfs()/getsvfs() for early Mercurial
354 use getvfs()/getsvfs() for early Mercurial
320 [1]
355 [1]
General Comments 0
You need to be logged in to leave comments. Login now