##// END OF EJS Templates
perf: add a way to benchmark `dirstate.status`...
marmoute -
r43702:eabc5eec default draft
parent child Browse files
Show More
@@ -1,3821 +1,3837 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [
764 (b'u', b'unknown', False, b'ask status to look for unknown files'),
765 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
766 ]
764 + formatteropts,
767 + formatteropts,
765 )
768 )
766 def perfstatus(ui, repo, **opts):
769 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
770 """benchmark the performance of a single status call
768
771
769 The repository data are preserved between each call.
772 The repository data are preserved between each call.
770
773
771 By default, only the status of the tracked file are requested. If
774 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
775 `--unknown` is passed, the "unknown" files are also tracked.
773 """
776 """
774 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
775 # m = match.always(repo.root, repo.getcwd())
778 # m = match.always(repo.root, repo.getcwd())
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
779 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 # False))))
780 # False))))
778 timer, fm = gettimer(ui, opts)
781 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
782 if opts[b'dirstate']:
783 dirstate = repo.dirstate
784 m = scmutil.matchall(repo)
785 unknown = opts[b'unknown']
786
787 def status_dirstate():
788 s = dirstate.status(
789 m, subrepos=[], ignored=False, clean=False, unknown=unknown
790 )
791 sum(map(len, s))
792
793 timer(status_dirstate)
794 else:
795 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 fm.end()
796 fm.end()
781
797
782
798
783 @command(b'perfaddremove', formatteropts)
799 @command(b'perfaddremove', formatteropts)
784 def perfaddremove(ui, repo, **opts):
800 def perfaddremove(ui, repo, **opts):
785 opts = _byteskwargs(opts)
801 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
802 timer, fm = gettimer(ui, opts)
787 try:
803 try:
788 oldquiet = repo.ui.quiet
804 oldquiet = repo.ui.quiet
789 repo.ui.quiet = True
805 repo.ui.quiet = True
790 matcher = scmutil.match(repo[None])
806 matcher = scmutil.match(repo[None])
791 opts[b'dry_run'] = True
807 opts[b'dry_run'] = True
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
808 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 uipathfn = scmutil.getuipathfn(repo)
809 uipathfn = scmutil.getuipathfn(repo)
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
810 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 else:
811 else:
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
812 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 finally:
813 finally:
798 repo.ui.quiet = oldquiet
814 repo.ui.quiet = oldquiet
799 fm.end()
815 fm.end()
800
816
801
817
802 def clearcaches(cl):
818 def clearcaches(cl):
803 # behave somewhat consistently across internal API changes
819 # behave somewhat consistently across internal API changes
804 if util.safehasattr(cl, b'clearcaches'):
820 if util.safehasattr(cl, b'clearcaches'):
805 cl.clearcaches()
821 cl.clearcaches()
806 elif util.safehasattr(cl, b'_nodecache'):
822 elif util.safehasattr(cl, b'_nodecache'):
807 from mercurial.node import nullid, nullrev
823 from mercurial.node import nullid, nullrev
808
824
809 cl._nodecache = {nullid: nullrev}
825 cl._nodecache = {nullid: nullrev}
810 cl._nodepos = None
826 cl._nodepos = None
811
827
812
828
813 @command(b'perfheads', formatteropts)
829 @command(b'perfheads', formatteropts)
814 def perfheads(ui, repo, **opts):
830 def perfheads(ui, repo, **opts):
815 """benchmark the computation of a changelog heads"""
831 """benchmark the computation of a changelog heads"""
816 opts = _byteskwargs(opts)
832 opts = _byteskwargs(opts)
817 timer, fm = gettimer(ui, opts)
833 timer, fm = gettimer(ui, opts)
818 cl = repo.changelog
834 cl = repo.changelog
819
835
820 def s():
836 def s():
821 clearcaches(cl)
837 clearcaches(cl)
822
838
823 def d():
839 def d():
824 len(cl.headrevs())
840 len(cl.headrevs())
825
841
826 timer(d, setup=s)
842 timer(d, setup=s)
827 fm.end()
843 fm.end()
828
844
829
845
830 @command(
846 @command(
831 b'perftags',
847 b'perftags',
832 formatteropts
848 formatteropts
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
849 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 )
850 )
835 def perftags(ui, repo, **opts):
851 def perftags(ui, repo, **opts):
836 opts = _byteskwargs(opts)
852 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
853 timer, fm = gettimer(ui, opts)
838 repocleartagscache = repocleartagscachefunc(repo)
854 repocleartagscache = repocleartagscachefunc(repo)
839 clearrevlogs = opts[b'clear_revlogs']
855 clearrevlogs = opts[b'clear_revlogs']
840
856
841 def s():
857 def s():
842 if clearrevlogs:
858 if clearrevlogs:
843 clearchangelog(repo)
859 clearchangelog(repo)
844 clearfilecache(repo.unfiltered(), 'manifest')
860 clearfilecache(repo.unfiltered(), 'manifest')
845 repocleartagscache()
861 repocleartagscache()
846
862
847 def t():
863 def t():
848 return len(repo.tags())
864 return len(repo.tags())
849
865
850 timer(t, setup=s)
866 timer(t, setup=s)
851 fm.end()
867 fm.end()
852
868
853
869
854 @command(b'perfancestors', formatteropts)
870 @command(b'perfancestors', formatteropts)
855 def perfancestors(ui, repo, **opts):
871 def perfancestors(ui, repo, **opts):
856 opts = _byteskwargs(opts)
872 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
873 timer, fm = gettimer(ui, opts)
858 heads = repo.changelog.headrevs()
874 heads = repo.changelog.headrevs()
859
875
860 def d():
876 def d():
861 for a in repo.changelog.ancestors(heads):
877 for a in repo.changelog.ancestors(heads):
862 pass
878 pass
863
879
864 timer(d)
880 timer(d)
865 fm.end()
881 fm.end()
866
882
867
883
868 @command(b'perfancestorset', formatteropts)
884 @command(b'perfancestorset', formatteropts)
869 def perfancestorset(ui, repo, revset, **opts):
885 def perfancestorset(ui, repo, revset, **opts):
870 opts = _byteskwargs(opts)
886 opts = _byteskwargs(opts)
871 timer, fm = gettimer(ui, opts)
887 timer, fm = gettimer(ui, opts)
872 revs = repo.revs(revset)
888 revs = repo.revs(revset)
873 heads = repo.changelog.headrevs()
889 heads = repo.changelog.headrevs()
874
890
875 def d():
891 def d():
876 s = repo.changelog.ancestors(heads)
892 s = repo.changelog.ancestors(heads)
877 for rev in revs:
893 for rev in revs:
878 rev in s
894 rev in s
879
895
880 timer(d)
896 timer(d)
881 fm.end()
897 fm.end()
882
898
883
899
884 @command(b'perfdiscovery', formatteropts, b'PATH')
900 @command(b'perfdiscovery', formatteropts, b'PATH')
885 def perfdiscovery(ui, repo, path, **opts):
901 def perfdiscovery(ui, repo, path, **opts):
886 """benchmark discovery between local repo and the peer at given path
902 """benchmark discovery between local repo and the peer at given path
887 """
903 """
888 repos = [repo, None]
904 repos = [repo, None]
889 timer, fm = gettimer(ui, opts)
905 timer, fm = gettimer(ui, opts)
890 path = ui.expandpath(path)
906 path = ui.expandpath(path)
891
907
892 def s():
908 def s():
893 repos[1] = hg.peer(ui, opts, path)
909 repos[1] = hg.peer(ui, opts, path)
894
910
895 def d():
911 def d():
896 setdiscovery.findcommonheads(ui, *repos)
912 setdiscovery.findcommonheads(ui, *repos)
897
913
898 timer(d, setup=s)
914 timer(d, setup=s)
899 fm.end()
915 fm.end()
900
916
901
917
902 @command(
918 @command(
903 b'perfbookmarks',
919 b'perfbookmarks',
904 formatteropts
920 formatteropts
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
921 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 )
922 )
907 def perfbookmarks(ui, repo, **opts):
923 def perfbookmarks(ui, repo, **opts):
908 """benchmark parsing bookmarks from disk to memory"""
924 """benchmark parsing bookmarks from disk to memory"""
909 opts = _byteskwargs(opts)
925 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
926 timer, fm = gettimer(ui, opts)
911
927
912 clearrevlogs = opts[b'clear_revlogs']
928 clearrevlogs = opts[b'clear_revlogs']
913
929
914 def s():
930 def s():
915 if clearrevlogs:
931 if clearrevlogs:
916 clearchangelog(repo)
932 clearchangelog(repo)
917 clearfilecache(repo, b'_bookmarks')
933 clearfilecache(repo, b'_bookmarks')
918
934
919 def d():
935 def d():
920 repo._bookmarks
936 repo._bookmarks
921
937
922 timer(d, setup=s)
938 timer(d, setup=s)
923 fm.end()
939 fm.end()
924
940
925
941
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
942 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 def perfbundleread(ui, repo, bundlepath, **opts):
943 def perfbundleread(ui, repo, bundlepath, **opts):
928 """Benchmark reading of bundle files.
944 """Benchmark reading of bundle files.
929
945
930 This command is meant to isolate the I/O part of bundle reading as
946 This command is meant to isolate the I/O part of bundle reading as
931 much as possible.
947 much as possible.
932 """
948 """
933 from mercurial import (
949 from mercurial import (
934 bundle2,
950 bundle2,
935 exchange,
951 exchange,
936 streamclone,
952 streamclone,
937 )
953 )
938
954
939 opts = _byteskwargs(opts)
955 opts = _byteskwargs(opts)
940
956
941 def makebench(fn):
957 def makebench(fn):
942 def run():
958 def run():
943 with open(bundlepath, b'rb') as fh:
959 with open(bundlepath, b'rb') as fh:
944 bundle = exchange.readbundle(ui, fh, bundlepath)
960 bundle = exchange.readbundle(ui, fh, bundlepath)
945 fn(bundle)
961 fn(bundle)
946
962
947 return run
963 return run
948
964
949 def makereadnbytes(size):
965 def makereadnbytes(size):
950 def run():
966 def run():
951 with open(bundlepath, b'rb') as fh:
967 with open(bundlepath, b'rb') as fh:
952 bundle = exchange.readbundle(ui, fh, bundlepath)
968 bundle = exchange.readbundle(ui, fh, bundlepath)
953 while bundle.read(size):
969 while bundle.read(size):
954 pass
970 pass
955
971
956 return run
972 return run
957
973
958 def makestdioread(size):
974 def makestdioread(size):
959 def run():
975 def run():
960 with open(bundlepath, b'rb') as fh:
976 with open(bundlepath, b'rb') as fh:
961 while fh.read(size):
977 while fh.read(size):
962 pass
978 pass
963
979
964 return run
980 return run
965
981
966 # bundle1
982 # bundle1
967
983
968 def deltaiter(bundle):
984 def deltaiter(bundle):
969 for delta in bundle.deltaiter():
985 for delta in bundle.deltaiter():
970 pass
986 pass
971
987
972 def iterchunks(bundle):
988 def iterchunks(bundle):
973 for chunk in bundle.getchunks():
989 for chunk in bundle.getchunks():
974 pass
990 pass
975
991
976 # bundle2
992 # bundle2
977
993
978 def forwardchunks(bundle):
994 def forwardchunks(bundle):
979 for chunk in bundle._forwardchunks():
995 for chunk in bundle._forwardchunks():
980 pass
996 pass
981
997
982 def iterparts(bundle):
998 def iterparts(bundle):
983 for part in bundle.iterparts():
999 for part in bundle.iterparts():
984 pass
1000 pass
985
1001
986 def iterpartsseekable(bundle):
1002 def iterpartsseekable(bundle):
987 for part in bundle.iterparts(seekable=True):
1003 for part in bundle.iterparts(seekable=True):
988 pass
1004 pass
989
1005
990 def seek(bundle):
1006 def seek(bundle):
991 for part in bundle.iterparts(seekable=True):
1007 for part in bundle.iterparts(seekable=True):
992 part.seek(0, os.SEEK_END)
1008 part.seek(0, os.SEEK_END)
993
1009
994 def makepartreadnbytes(size):
1010 def makepartreadnbytes(size):
995 def run():
1011 def run():
996 with open(bundlepath, b'rb') as fh:
1012 with open(bundlepath, b'rb') as fh:
997 bundle = exchange.readbundle(ui, fh, bundlepath)
1013 bundle = exchange.readbundle(ui, fh, bundlepath)
998 for part in bundle.iterparts():
1014 for part in bundle.iterparts():
999 while part.read(size):
1015 while part.read(size):
1000 pass
1016 pass
1001
1017
1002 return run
1018 return run
1003
1019
1004 benches = [
1020 benches = [
1005 (makestdioread(8192), b'read(8k)'),
1021 (makestdioread(8192), b'read(8k)'),
1006 (makestdioread(16384), b'read(16k)'),
1022 (makestdioread(16384), b'read(16k)'),
1007 (makestdioread(32768), b'read(32k)'),
1023 (makestdioread(32768), b'read(32k)'),
1008 (makestdioread(131072), b'read(128k)'),
1024 (makestdioread(131072), b'read(128k)'),
1009 ]
1025 ]
1010
1026
1011 with open(bundlepath, b'rb') as fh:
1027 with open(bundlepath, b'rb') as fh:
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1028 bundle = exchange.readbundle(ui, fh, bundlepath)
1013
1029
1014 if isinstance(bundle, changegroup.cg1unpacker):
1030 if isinstance(bundle, changegroup.cg1unpacker):
1015 benches.extend(
1031 benches.extend(
1016 [
1032 [
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1033 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1034 (makebench(iterchunks), b'cg1 getchunks()'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1035 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1036 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1037 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1038 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 ]
1039 ]
1024 )
1040 )
1025 elif isinstance(bundle, bundle2.unbundle20):
1041 elif isinstance(bundle, bundle2.unbundle20):
1026 benches.extend(
1042 benches.extend(
1027 [
1043 [
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1044 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1045 (makebench(iterparts), b'bundle2 iterparts()'),
1030 (
1046 (
1031 makebench(iterpartsseekable),
1047 makebench(iterpartsseekable),
1032 b'bundle2 iterparts() seekable',
1048 b'bundle2 iterparts() seekable',
1033 ),
1049 ),
1034 (makebench(seek), b'bundle2 part seek()'),
1050 (makebench(seek), b'bundle2 part seek()'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1051 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1052 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1053 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1054 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 ]
1055 ]
1040 )
1056 )
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1057 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 raise error.Abort(b'stream clone bundles not supported')
1058 raise error.Abort(b'stream clone bundles not supported')
1043 else:
1059 else:
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1060 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045
1061
1046 for fn, title in benches:
1062 for fn, title in benches:
1047 timer, fm = gettimer(ui, opts)
1063 timer, fm = gettimer(ui, opts)
1048 timer(fn, title=title)
1064 timer(fn, title=title)
1049 fm.end()
1065 fm.end()
1050
1066
1051
1067
1052 @command(
1068 @command(
1053 b'perfchangegroupchangelog',
1069 b'perfchangegroupchangelog',
1054 formatteropts
1070 formatteropts
1055 + [
1071 + [
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1072 (b'', b'cgversion', b'02', b'changegroup version'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1073 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 ],
1074 ],
1059 )
1075 )
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1076 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 """Benchmark producing a changelog group for a changegroup.
1077 """Benchmark producing a changelog group for a changegroup.
1062
1078
1063 This measures the time spent processing the changelog during a
1079 This measures the time spent processing the changelog during a
1064 bundle operation. This occurs during `hg bundle` and on a server
1080 bundle operation. This occurs during `hg bundle` and on a server
1065 processing a `getbundle` wire protocol request (handles clones
1081 processing a `getbundle` wire protocol request (handles clones
1066 and pull requests).
1082 and pull requests).
1067
1083
1068 By default, all revisions are added to the changegroup.
1084 By default, all revisions are added to the changegroup.
1069 """
1085 """
1070 opts = _byteskwargs(opts)
1086 opts = _byteskwargs(opts)
1071 cl = repo.changelog
1087 cl = repo.changelog
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1088 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 bundler = changegroup.getbundler(cgversion, repo)
1089 bundler = changegroup.getbundler(cgversion, repo)
1074
1090
1075 def d():
1091 def d():
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1092 state, chunks = bundler._generatechangelog(cl, nodes)
1077 for chunk in chunks:
1093 for chunk in chunks:
1078 pass
1094 pass
1079
1095
1080 timer, fm = gettimer(ui, opts)
1096 timer, fm = gettimer(ui, opts)
1081
1097
1082 # Terminal printing can interfere with timing. So disable it.
1098 # Terminal printing can interfere with timing. So disable it.
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1099 with ui.configoverride({(b'progress', b'disable'): True}):
1084 timer(d)
1100 timer(d)
1085
1101
1086 fm.end()
1102 fm.end()
1087
1103
1088
1104
1089 @command(b'perfdirs', formatteropts)
1105 @command(b'perfdirs', formatteropts)
1090 def perfdirs(ui, repo, **opts):
1106 def perfdirs(ui, repo, **opts):
1091 opts = _byteskwargs(opts)
1107 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1108 timer, fm = gettimer(ui, opts)
1093 dirstate = repo.dirstate
1109 dirstate = repo.dirstate
1094 b'a' in dirstate
1110 b'a' in dirstate
1095
1111
1096 def d():
1112 def d():
1097 dirstate.hasdir(b'a')
1113 dirstate.hasdir(b'a')
1098 del dirstate._map._dirs
1114 del dirstate._map._dirs
1099
1115
1100 timer(d)
1116 timer(d)
1101 fm.end()
1117 fm.end()
1102
1118
1103
1119
1104 @command(
1120 @command(
1105 b'perfdirstate',
1121 b'perfdirstate',
1106 [
1122 [
1107 (
1123 (
1108 b'',
1124 b'',
1109 b'iteration',
1125 b'iteration',
1110 None,
1126 None,
1111 b'benchmark a full iteration for the dirstate',
1127 b'benchmark a full iteration for the dirstate',
1112 ),
1128 ),
1113 (
1129 (
1114 b'',
1130 b'',
1115 b'contains',
1131 b'contains',
1116 None,
1132 None,
1117 b'benchmark a large amount of `nf in dirstate` calls',
1133 b'benchmark a large amount of `nf in dirstate` calls',
1118 ),
1134 ),
1119 ]
1135 ]
1120 + formatteropts,
1136 + formatteropts,
1121 )
1137 )
1122 def perfdirstate(ui, repo, **opts):
1138 def perfdirstate(ui, repo, **opts):
1123 """benchmap the time of various distate operations
1139 """benchmap the time of various distate operations
1124
1140
1125 By default benchmark the time necessary to load a dirstate from scratch.
1141 By default benchmark the time necessary to load a dirstate from scratch.
1126 The dirstate is loaded to the point were a "contains" request can be
1142 The dirstate is loaded to the point were a "contains" request can be
1127 answered.
1143 answered.
1128 """
1144 """
1129 opts = _byteskwargs(opts)
1145 opts = _byteskwargs(opts)
1130 timer, fm = gettimer(ui, opts)
1146 timer, fm = gettimer(ui, opts)
1131 b"a" in repo.dirstate
1147 b"a" in repo.dirstate
1132
1148
1133 if opts[b'iteration'] and opts[b'contains']:
1149 if opts[b'iteration'] and opts[b'contains']:
1134 msg = b'only specify one of --iteration or --contains'
1150 msg = b'only specify one of --iteration or --contains'
1135 raise error.Abort(msg)
1151 raise error.Abort(msg)
1136
1152
1137 if opts[b'iteration']:
1153 if opts[b'iteration']:
1138 setup = None
1154 setup = None
1139 dirstate = repo.dirstate
1155 dirstate = repo.dirstate
1140
1156
1141 def d():
1157 def d():
1142 for f in dirstate:
1158 for f in dirstate:
1143 pass
1159 pass
1144
1160
1145 elif opts[b'contains']:
1161 elif opts[b'contains']:
1146 setup = None
1162 setup = None
1147 dirstate = repo.dirstate
1163 dirstate = repo.dirstate
1148 allfiles = list(dirstate)
1164 allfiles = list(dirstate)
1149 # also add file path that will be "missing" from the dirstate
1165 # also add file path that will be "missing" from the dirstate
1150 allfiles.extend([f[::-1] for f in allfiles])
1166 allfiles.extend([f[::-1] for f in allfiles])
1151
1167
1152 def d():
1168 def d():
1153 for f in allfiles:
1169 for f in allfiles:
1154 f in dirstate
1170 f in dirstate
1155
1171
1156 else:
1172 else:
1157
1173
1158 def setup():
1174 def setup():
1159 repo.dirstate.invalidate()
1175 repo.dirstate.invalidate()
1160
1176
1161 def d():
1177 def d():
1162 b"a" in repo.dirstate
1178 b"a" in repo.dirstate
1163
1179
1164 timer(d, setup=setup)
1180 timer(d, setup=setup)
1165 fm.end()
1181 fm.end()
1166
1182
1167
1183
1168 @command(b'perfdirstatedirs', formatteropts)
1184 @command(b'perfdirstatedirs', formatteropts)
1169 def perfdirstatedirs(ui, repo, **opts):
1185 def perfdirstatedirs(ui, repo, **opts):
1170 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1186 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1171 """
1187 """
1172 opts = _byteskwargs(opts)
1188 opts = _byteskwargs(opts)
1173 timer, fm = gettimer(ui, opts)
1189 timer, fm = gettimer(ui, opts)
1174 repo.dirstate.hasdir(b"a")
1190 repo.dirstate.hasdir(b"a")
1175
1191
1176 def setup():
1192 def setup():
1177 del repo.dirstate._map._dirs
1193 del repo.dirstate._map._dirs
1178
1194
1179 def d():
1195 def d():
1180 repo.dirstate.hasdir(b"a")
1196 repo.dirstate.hasdir(b"a")
1181
1197
1182 timer(d, setup=setup)
1198 timer(d, setup=setup)
1183 fm.end()
1199 fm.end()
1184
1200
1185
1201
1186 @command(b'perfdirstatefoldmap', formatteropts)
1202 @command(b'perfdirstatefoldmap', formatteropts)
1187 def perfdirstatefoldmap(ui, repo, **opts):
1203 def perfdirstatefoldmap(ui, repo, **opts):
1188 """benchmap a `dirstate._map.filefoldmap.get()` request
1204 """benchmap a `dirstate._map.filefoldmap.get()` request
1189
1205
1190 The dirstate filefoldmap cache is dropped between every request.
1206 The dirstate filefoldmap cache is dropped between every request.
1191 """
1207 """
1192 opts = _byteskwargs(opts)
1208 opts = _byteskwargs(opts)
1193 timer, fm = gettimer(ui, opts)
1209 timer, fm = gettimer(ui, opts)
1194 dirstate = repo.dirstate
1210 dirstate = repo.dirstate
1195 dirstate._map.filefoldmap.get(b'a')
1211 dirstate._map.filefoldmap.get(b'a')
1196
1212
1197 def setup():
1213 def setup():
1198 del dirstate._map.filefoldmap
1214 del dirstate._map.filefoldmap
1199
1215
1200 def d():
1216 def d():
1201 dirstate._map.filefoldmap.get(b'a')
1217 dirstate._map.filefoldmap.get(b'a')
1202
1218
1203 timer(d, setup=setup)
1219 timer(d, setup=setup)
1204 fm.end()
1220 fm.end()
1205
1221
1206
1222
1207 @command(b'perfdirfoldmap', formatteropts)
1223 @command(b'perfdirfoldmap', formatteropts)
1208 def perfdirfoldmap(ui, repo, **opts):
1224 def perfdirfoldmap(ui, repo, **opts):
1209 """benchmap a `dirstate._map.dirfoldmap.get()` request
1225 """benchmap a `dirstate._map.dirfoldmap.get()` request
1210
1226
1211 The dirstate dirfoldmap cache is dropped between every request.
1227 The dirstate dirfoldmap cache is dropped between every request.
1212 """
1228 """
1213 opts = _byteskwargs(opts)
1229 opts = _byteskwargs(opts)
1214 timer, fm = gettimer(ui, opts)
1230 timer, fm = gettimer(ui, opts)
1215 dirstate = repo.dirstate
1231 dirstate = repo.dirstate
1216 dirstate._map.dirfoldmap.get(b'a')
1232 dirstate._map.dirfoldmap.get(b'a')
1217
1233
1218 def setup():
1234 def setup():
1219 del dirstate._map.dirfoldmap
1235 del dirstate._map.dirfoldmap
1220 del dirstate._map._dirs
1236 del dirstate._map._dirs
1221
1237
1222 def d():
1238 def d():
1223 dirstate._map.dirfoldmap.get(b'a')
1239 dirstate._map.dirfoldmap.get(b'a')
1224
1240
1225 timer(d, setup=setup)
1241 timer(d, setup=setup)
1226 fm.end()
1242 fm.end()
1227
1243
1228
1244
1229 @command(b'perfdirstatewrite', formatteropts)
1245 @command(b'perfdirstatewrite', formatteropts)
1230 def perfdirstatewrite(ui, repo, **opts):
1246 def perfdirstatewrite(ui, repo, **opts):
1231 """benchmap the time it take to write a dirstate on disk
1247 """benchmap the time it take to write a dirstate on disk
1232 """
1248 """
1233 opts = _byteskwargs(opts)
1249 opts = _byteskwargs(opts)
1234 timer, fm = gettimer(ui, opts)
1250 timer, fm = gettimer(ui, opts)
1235 ds = repo.dirstate
1251 ds = repo.dirstate
1236 b"a" in ds
1252 b"a" in ds
1237
1253
1238 def setup():
1254 def setup():
1239 ds._dirty = True
1255 ds._dirty = True
1240
1256
1241 def d():
1257 def d():
1242 ds.write(repo.currenttransaction())
1258 ds.write(repo.currenttransaction())
1243
1259
1244 timer(d, setup=setup)
1260 timer(d, setup=setup)
1245 fm.end()
1261 fm.end()
1246
1262
1247
1263
1248 def _getmergerevs(repo, opts):
1264 def _getmergerevs(repo, opts):
1249 """parse command argument to return rev involved in merge
1265 """parse command argument to return rev involved in merge
1250
1266
1251 input: options dictionnary with `rev`, `from` and `bse`
1267 input: options dictionnary with `rev`, `from` and `bse`
1252 output: (localctx, otherctx, basectx)
1268 output: (localctx, otherctx, basectx)
1253 """
1269 """
1254 if opts[b'from']:
1270 if opts[b'from']:
1255 fromrev = scmutil.revsingle(repo, opts[b'from'])
1271 fromrev = scmutil.revsingle(repo, opts[b'from'])
1256 wctx = repo[fromrev]
1272 wctx = repo[fromrev]
1257 else:
1273 else:
1258 wctx = repo[None]
1274 wctx = repo[None]
1259 # we don't want working dir files to be stat'd in the benchmark, so
1275 # we don't want working dir files to be stat'd in the benchmark, so
1260 # prime that cache
1276 # prime that cache
1261 wctx.dirty()
1277 wctx.dirty()
1262 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1278 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1263 if opts[b'base']:
1279 if opts[b'base']:
1264 fromrev = scmutil.revsingle(repo, opts[b'base'])
1280 fromrev = scmutil.revsingle(repo, opts[b'base'])
1265 ancestor = repo[fromrev]
1281 ancestor = repo[fromrev]
1266 else:
1282 else:
1267 ancestor = wctx.ancestor(rctx)
1283 ancestor = wctx.ancestor(rctx)
1268 return (wctx, rctx, ancestor)
1284 return (wctx, rctx, ancestor)
1269
1285
1270
1286
1271 @command(
1287 @command(
1272 b'perfmergecalculate',
1288 b'perfmergecalculate',
1273 [
1289 [
1274 (b'r', b'rev', b'.', b'rev to merge against'),
1290 (b'r', b'rev', b'.', b'rev to merge against'),
1275 (b'', b'from', b'', b'rev to merge from'),
1291 (b'', b'from', b'', b'rev to merge from'),
1276 (b'', b'base', b'', b'the revision to use as base'),
1292 (b'', b'base', b'', b'the revision to use as base'),
1277 ]
1293 ]
1278 + formatteropts,
1294 + formatteropts,
1279 )
1295 )
1280 def perfmergecalculate(ui, repo, **opts):
1296 def perfmergecalculate(ui, repo, **opts):
1281 opts = _byteskwargs(opts)
1297 opts = _byteskwargs(opts)
1282 timer, fm = gettimer(ui, opts)
1298 timer, fm = gettimer(ui, opts)
1283
1299
1284 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1300 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1285
1301
1286 def d():
1302 def d():
1287 # acceptremote is True because we don't want prompts in the middle of
1303 # acceptremote is True because we don't want prompts in the middle of
1288 # our benchmark
1304 # our benchmark
1289 merge.calculateupdates(
1305 merge.calculateupdates(
1290 repo,
1306 repo,
1291 wctx,
1307 wctx,
1292 rctx,
1308 rctx,
1293 [ancestor],
1309 [ancestor],
1294 branchmerge=False,
1310 branchmerge=False,
1295 force=False,
1311 force=False,
1296 acceptremote=True,
1312 acceptremote=True,
1297 followcopies=True,
1313 followcopies=True,
1298 )
1314 )
1299
1315
1300 timer(d)
1316 timer(d)
1301 fm.end()
1317 fm.end()
1302
1318
1303
1319
1304 @command(
1320 @command(
1305 b'perfmergecopies',
1321 b'perfmergecopies',
1306 [
1322 [
1307 (b'r', b'rev', b'.', b'rev to merge against'),
1323 (b'r', b'rev', b'.', b'rev to merge against'),
1308 (b'', b'from', b'', b'rev to merge from'),
1324 (b'', b'from', b'', b'rev to merge from'),
1309 (b'', b'base', b'', b'the revision to use as base'),
1325 (b'', b'base', b'', b'the revision to use as base'),
1310 ]
1326 ]
1311 + formatteropts,
1327 + formatteropts,
1312 )
1328 )
1313 def perfmergecopies(ui, repo, **opts):
1329 def perfmergecopies(ui, repo, **opts):
1314 """measure runtime of `copies.mergecopies`"""
1330 """measure runtime of `copies.mergecopies`"""
1315 opts = _byteskwargs(opts)
1331 opts = _byteskwargs(opts)
1316 timer, fm = gettimer(ui, opts)
1332 timer, fm = gettimer(ui, opts)
1317 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1333 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1318
1334
1319 def d():
1335 def d():
1320 # acceptremote is True because we don't want prompts in the middle of
1336 # acceptremote is True because we don't want prompts in the middle of
1321 # our benchmark
1337 # our benchmark
1322 copies.mergecopies(repo, wctx, rctx, ancestor)
1338 copies.mergecopies(repo, wctx, rctx, ancestor)
1323
1339
1324 timer(d)
1340 timer(d)
1325 fm.end()
1341 fm.end()
1326
1342
1327
1343
1328 @command(b'perfpathcopies', [], b"REV REV")
1344 @command(b'perfpathcopies', [], b"REV REV")
1329 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1345 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1330 """benchmark the copy tracing logic"""
1346 """benchmark the copy tracing logic"""
1331 opts = _byteskwargs(opts)
1347 opts = _byteskwargs(opts)
1332 timer, fm = gettimer(ui, opts)
1348 timer, fm = gettimer(ui, opts)
1333 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1349 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1334 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1350 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1335
1351
1336 def d():
1352 def d():
1337 copies.pathcopies(ctx1, ctx2)
1353 copies.pathcopies(ctx1, ctx2)
1338
1354
1339 timer(d)
1355 timer(d)
1340 fm.end()
1356 fm.end()
1341
1357
1342
1358
1343 @command(
1359 @command(
1344 b'perfphases',
1360 b'perfphases',
1345 [(b'', b'full', False, b'include file reading time too'),],
1361 [(b'', b'full', False, b'include file reading time too'),],
1346 b"",
1362 b"",
1347 )
1363 )
1348 def perfphases(ui, repo, **opts):
1364 def perfphases(ui, repo, **opts):
1349 """benchmark phasesets computation"""
1365 """benchmark phasesets computation"""
1350 opts = _byteskwargs(opts)
1366 opts = _byteskwargs(opts)
1351 timer, fm = gettimer(ui, opts)
1367 timer, fm = gettimer(ui, opts)
1352 _phases = repo._phasecache
1368 _phases = repo._phasecache
1353 full = opts.get(b'full')
1369 full = opts.get(b'full')
1354
1370
1355 def d():
1371 def d():
1356 phases = _phases
1372 phases = _phases
1357 if full:
1373 if full:
1358 clearfilecache(repo, b'_phasecache')
1374 clearfilecache(repo, b'_phasecache')
1359 phases = repo._phasecache
1375 phases = repo._phasecache
1360 phases.invalidate()
1376 phases.invalidate()
1361 phases.loadphaserevs(repo)
1377 phases.loadphaserevs(repo)
1362
1378
1363 timer(d)
1379 timer(d)
1364 fm.end()
1380 fm.end()
1365
1381
1366
1382
1367 @command(b'perfphasesremote', [], b"[DEST]")
1383 @command(b'perfphasesremote', [], b"[DEST]")
1368 def perfphasesremote(ui, repo, dest=None, **opts):
1384 def perfphasesremote(ui, repo, dest=None, **opts):
1369 """benchmark time needed to analyse phases of the remote server"""
1385 """benchmark time needed to analyse phases of the remote server"""
1370 from mercurial.node import bin
1386 from mercurial.node import bin
1371 from mercurial import (
1387 from mercurial import (
1372 exchange,
1388 exchange,
1373 hg,
1389 hg,
1374 phases,
1390 phases,
1375 )
1391 )
1376
1392
1377 opts = _byteskwargs(opts)
1393 opts = _byteskwargs(opts)
1378 timer, fm = gettimer(ui, opts)
1394 timer, fm = gettimer(ui, opts)
1379
1395
1380 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1396 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1381 if not path:
1397 if not path:
1382 raise error.Abort(
1398 raise error.Abort(
1383 b'default repository not configured!',
1399 b'default repository not configured!',
1384 hint=b"see 'hg help config.paths'",
1400 hint=b"see 'hg help config.paths'",
1385 )
1401 )
1386 dest = path.pushloc or path.loc
1402 dest = path.pushloc or path.loc
1387 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1403 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1388 other = hg.peer(repo, opts, dest)
1404 other = hg.peer(repo, opts, dest)
1389
1405
1390 # easier to perform discovery through the operation
1406 # easier to perform discovery through the operation
1391 op = exchange.pushoperation(repo, other)
1407 op = exchange.pushoperation(repo, other)
1392 exchange._pushdiscoverychangeset(op)
1408 exchange._pushdiscoverychangeset(op)
1393
1409
1394 remotesubset = op.fallbackheads
1410 remotesubset = op.fallbackheads
1395
1411
1396 with other.commandexecutor() as e:
1412 with other.commandexecutor() as e:
1397 remotephases = e.callcommand(
1413 remotephases = e.callcommand(
1398 b'listkeys', {b'namespace': b'phases'}
1414 b'listkeys', {b'namespace': b'phases'}
1399 ).result()
1415 ).result()
1400 del other
1416 del other
1401 publishing = remotephases.get(b'publishing', False)
1417 publishing = remotephases.get(b'publishing', False)
1402 if publishing:
1418 if publishing:
1403 ui.statusnoi18n(b'publishing: yes\n')
1419 ui.statusnoi18n(b'publishing: yes\n')
1404 else:
1420 else:
1405 ui.statusnoi18n(b'publishing: no\n')
1421 ui.statusnoi18n(b'publishing: no\n')
1406
1422
1407 nodemap = repo.changelog.nodemap
1423 nodemap = repo.changelog.nodemap
1408 nonpublishroots = 0
1424 nonpublishroots = 0
1409 for nhex, phase in remotephases.iteritems():
1425 for nhex, phase in remotephases.iteritems():
1410 if nhex == b'publishing': # ignore data related to publish option
1426 if nhex == b'publishing': # ignore data related to publish option
1411 continue
1427 continue
1412 node = bin(nhex)
1428 node = bin(nhex)
1413 if node in nodemap and int(phase):
1429 if node in nodemap and int(phase):
1414 nonpublishroots += 1
1430 nonpublishroots += 1
1415 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1431 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1416 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1432 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1417
1433
1418 def d():
1434 def d():
1419 phases.remotephasessummary(repo, remotesubset, remotephases)
1435 phases.remotephasessummary(repo, remotesubset, remotephases)
1420
1436
1421 timer(d)
1437 timer(d)
1422 fm.end()
1438 fm.end()
1423
1439
1424
1440
1425 @command(
1441 @command(
1426 b'perfmanifest',
1442 b'perfmanifest',
1427 [
1443 [
1428 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1444 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1429 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1445 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1430 ]
1446 ]
1431 + formatteropts,
1447 + formatteropts,
1432 b'REV|NODE',
1448 b'REV|NODE',
1433 )
1449 )
1434 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1450 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1435 """benchmark the time to read a manifest from disk and return a usable
1451 """benchmark the time to read a manifest from disk and return a usable
1436 dict-like object
1452 dict-like object
1437
1453
1438 Manifest caches are cleared before retrieval."""
1454 Manifest caches are cleared before retrieval."""
1439 opts = _byteskwargs(opts)
1455 opts = _byteskwargs(opts)
1440 timer, fm = gettimer(ui, opts)
1456 timer, fm = gettimer(ui, opts)
1441 if not manifest_rev:
1457 if not manifest_rev:
1442 ctx = scmutil.revsingle(repo, rev, rev)
1458 ctx = scmutil.revsingle(repo, rev, rev)
1443 t = ctx.manifestnode()
1459 t = ctx.manifestnode()
1444 else:
1460 else:
1445 from mercurial.node import bin
1461 from mercurial.node import bin
1446
1462
1447 if len(rev) == 40:
1463 if len(rev) == 40:
1448 t = bin(rev)
1464 t = bin(rev)
1449 else:
1465 else:
1450 try:
1466 try:
1451 rev = int(rev)
1467 rev = int(rev)
1452
1468
1453 if util.safehasattr(repo.manifestlog, b'getstorage'):
1469 if util.safehasattr(repo.manifestlog, b'getstorage'):
1454 t = repo.manifestlog.getstorage(b'').node(rev)
1470 t = repo.manifestlog.getstorage(b'').node(rev)
1455 else:
1471 else:
1456 t = repo.manifestlog._revlog.lookup(rev)
1472 t = repo.manifestlog._revlog.lookup(rev)
1457 except ValueError:
1473 except ValueError:
1458 raise error.Abort(
1474 raise error.Abort(
1459 b'manifest revision must be integer or full node'
1475 b'manifest revision must be integer or full node'
1460 )
1476 )
1461
1477
1462 def d():
1478 def d():
1463 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1479 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1464 repo.manifestlog[t].read()
1480 repo.manifestlog[t].read()
1465
1481
1466 timer(d)
1482 timer(d)
1467 fm.end()
1483 fm.end()
1468
1484
1469
1485
1470 @command(b'perfchangeset', formatteropts)
1486 @command(b'perfchangeset', formatteropts)
1471 def perfchangeset(ui, repo, rev, **opts):
1487 def perfchangeset(ui, repo, rev, **opts):
1472 opts = _byteskwargs(opts)
1488 opts = _byteskwargs(opts)
1473 timer, fm = gettimer(ui, opts)
1489 timer, fm = gettimer(ui, opts)
1474 n = scmutil.revsingle(repo, rev).node()
1490 n = scmutil.revsingle(repo, rev).node()
1475
1491
1476 def d():
1492 def d():
1477 repo.changelog.read(n)
1493 repo.changelog.read(n)
1478 # repo.changelog._cache = None
1494 # repo.changelog._cache = None
1479
1495
1480 timer(d)
1496 timer(d)
1481 fm.end()
1497 fm.end()
1482
1498
1483
1499
1484 @command(b'perfignore', formatteropts)
1500 @command(b'perfignore', formatteropts)
1485 def perfignore(ui, repo, **opts):
1501 def perfignore(ui, repo, **opts):
1486 """benchmark operation related to computing ignore"""
1502 """benchmark operation related to computing ignore"""
1487 opts = _byteskwargs(opts)
1503 opts = _byteskwargs(opts)
1488 timer, fm = gettimer(ui, opts)
1504 timer, fm = gettimer(ui, opts)
1489 dirstate = repo.dirstate
1505 dirstate = repo.dirstate
1490
1506
1491 def setupone():
1507 def setupone():
1492 dirstate.invalidate()
1508 dirstate.invalidate()
1493 clearfilecache(dirstate, b'_ignore')
1509 clearfilecache(dirstate, b'_ignore')
1494
1510
1495 def runone():
1511 def runone():
1496 dirstate._ignore
1512 dirstate._ignore
1497
1513
1498 timer(runone, setup=setupone, title=b"load")
1514 timer(runone, setup=setupone, title=b"load")
1499 fm.end()
1515 fm.end()
1500
1516
1501
1517
1502 @command(
1518 @command(
1503 b'perfindex',
1519 b'perfindex',
1504 [
1520 [
1505 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1521 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1506 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1522 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1507 ]
1523 ]
1508 + formatteropts,
1524 + formatteropts,
1509 )
1525 )
1510 def perfindex(ui, repo, **opts):
1526 def perfindex(ui, repo, **opts):
1511 """benchmark index creation time followed by a lookup
1527 """benchmark index creation time followed by a lookup
1512
1528
1513 The default is to look `tip` up. Depending on the index implementation,
1529 The default is to look `tip` up. Depending on the index implementation,
1514 the revision looked up can matters. For example, an implementation
1530 the revision looked up can matters. For example, an implementation
1515 scanning the index will have a faster lookup time for `--rev tip` than for
1531 scanning the index will have a faster lookup time for `--rev tip` than for
1516 `--rev 0`. The number of looked up revisions and their order can also
1532 `--rev 0`. The number of looked up revisions and their order can also
1517 matters.
1533 matters.
1518
1534
1519 Example of useful set to test:
1535 Example of useful set to test:
1520 * tip
1536 * tip
1521 * 0
1537 * 0
1522 * -10:
1538 * -10:
1523 * :10
1539 * :10
1524 * -10: + :10
1540 * -10: + :10
1525 * :10: + -10:
1541 * :10: + -10:
1526 * -10000:
1542 * -10000:
1527 * -10000: + 0
1543 * -10000: + 0
1528
1544
1529 It is not currently possible to check for lookup of a missing node. For
1545 It is not currently possible to check for lookup of a missing node. For
1530 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1546 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1531 import mercurial.revlog
1547 import mercurial.revlog
1532
1548
1533 opts = _byteskwargs(opts)
1549 opts = _byteskwargs(opts)
1534 timer, fm = gettimer(ui, opts)
1550 timer, fm = gettimer(ui, opts)
1535 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1551 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1536 if opts[b'no_lookup']:
1552 if opts[b'no_lookup']:
1537 if opts['rev']:
1553 if opts['rev']:
1538 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1554 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1539 nodes = []
1555 nodes = []
1540 elif not opts[b'rev']:
1556 elif not opts[b'rev']:
1541 nodes = [repo[b"tip"].node()]
1557 nodes = [repo[b"tip"].node()]
1542 else:
1558 else:
1543 revs = scmutil.revrange(repo, opts[b'rev'])
1559 revs = scmutil.revrange(repo, opts[b'rev'])
1544 cl = repo.changelog
1560 cl = repo.changelog
1545 nodes = [cl.node(r) for r in revs]
1561 nodes = [cl.node(r) for r in revs]
1546
1562
1547 unfi = repo.unfiltered()
1563 unfi = repo.unfiltered()
1548 # find the filecache func directly
1564 # find the filecache func directly
1549 # This avoid polluting the benchmark with the filecache logic
1565 # This avoid polluting the benchmark with the filecache logic
1550 makecl = unfi.__class__.changelog.func
1566 makecl = unfi.__class__.changelog.func
1551
1567
1552 def setup():
1568 def setup():
1553 # probably not necessary, but for good measure
1569 # probably not necessary, but for good measure
1554 clearchangelog(unfi)
1570 clearchangelog(unfi)
1555
1571
1556 def d():
1572 def d():
1557 cl = makecl(unfi)
1573 cl = makecl(unfi)
1558 for n in nodes:
1574 for n in nodes:
1559 cl.rev(n)
1575 cl.rev(n)
1560
1576
1561 timer(d, setup=setup)
1577 timer(d, setup=setup)
1562 fm.end()
1578 fm.end()
1563
1579
1564
1580
1565 @command(
1581 @command(
1566 b'perfnodemap',
1582 b'perfnodemap',
1567 [
1583 [
1568 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1584 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1569 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1585 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1570 ]
1586 ]
1571 + formatteropts,
1587 + formatteropts,
1572 )
1588 )
1573 def perfnodemap(ui, repo, **opts):
1589 def perfnodemap(ui, repo, **opts):
1574 """benchmark the time necessary to look up revision from a cold nodemap
1590 """benchmark the time necessary to look up revision from a cold nodemap
1575
1591
1576 Depending on the implementation, the amount and order of revision we look
1592 Depending on the implementation, the amount and order of revision we look
1577 up can varies. Example of useful set to test:
1593 up can varies. Example of useful set to test:
1578 * tip
1594 * tip
1579 * 0
1595 * 0
1580 * -10:
1596 * -10:
1581 * :10
1597 * :10
1582 * -10: + :10
1598 * -10: + :10
1583 * :10: + -10:
1599 * :10: + -10:
1584 * -10000:
1600 * -10000:
1585 * -10000: + 0
1601 * -10000: + 0
1586
1602
1587 The command currently focus on valid binary lookup. Benchmarking for
1603 The command currently focus on valid binary lookup. Benchmarking for
1588 hexlookup, prefix lookup and missing lookup would also be valuable.
1604 hexlookup, prefix lookup and missing lookup would also be valuable.
1589 """
1605 """
1590 import mercurial.revlog
1606 import mercurial.revlog
1591
1607
1592 opts = _byteskwargs(opts)
1608 opts = _byteskwargs(opts)
1593 timer, fm = gettimer(ui, opts)
1609 timer, fm = gettimer(ui, opts)
1594 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1610 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1595
1611
1596 unfi = repo.unfiltered()
1612 unfi = repo.unfiltered()
1597 clearcaches = opts['clear_caches']
1613 clearcaches = opts['clear_caches']
1598 # find the filecache func directly
1614 # find the filecache func directly
1599 # This avoid polluting the benchmark with the filecache logic
1615 # This avoid polluting the benchmark with the filecache logic
1600 makecl = unfi.__class__.changelog.func
1616 makecl = unfi.__class__.changelog.func
1601 if not opts[b'rev']:
1617 if not opts[b'rev']:
1602 raise error.Abort('use --rev to specify revisions to look up')
1618 raise error.Abort('use --rev to specify revisions to look up')
1603 revs = scmutil.revrange(repo, opts[b'rev'])
1619 revs = scmutil.revrange(repo, opts[b'rev'])
1604 cl = repo.changelog
1620 cl = repo.changelog
1605 nodes = [cl.node(r) for r in revs]
1621 nodes = [cl.node(r) for r in revs]
1606
1622
1607 # use a list to pass reference to a nodemap from one closure to the next
1623 # use a list to pass reference to a nodemap from one closure to the next
1608 nodeget = [None]
1624 nodeget = [None]
1609
1625
1610 def setnodeget():
1626 def setnodeget():
1611 # probably not necessary, but for good measure
1627 # probably not necessary, but for good measure
1612 clearchangelog(unfi)
1628 clearchangelog(unfi)
1613 nodeget[0] = makecl(unfi).nodemap.get
1629 nodeget[0] = makecl(unfi).nodemap.get
1614
1630
1615 def d():
1631 def d():
1616 get = nodeget[0]
1632 get = nodeget[0]
1617 for n in nodes:
1633 for n in nodes:
1618 get(n)
1634 get(n)
1619
1635
1620 setup = None
1636 setup = None
1621 if clearcaches:
1637 if clearcaches:
1622
1638
1623 def setup():
1639 def setup():
1624 setnodeget()
1640 setnodeget()
1625
1641
1626 else:
1642 else:
1627 setnodeget()
1643 setnodeget()
1628 d() # prewarm the data structure
1644 d() # prewarm the data structure
1629 timer(d, setup=setup)
1645 timer(d, setup=setup)
1630 fm.end()
1646 fm.end()
1631
1647
1632
1648
1633 @command(b'perfstartup', formatteropts)
1649 @command(b'perfstartup', formatteropts)
1634 def perfstartup(ui, repo, **opts):
1650 def perfstartup(ui, repo, **opts):
1635 opts = _byteskwargs(opts)
1651 opts = _byteskwargs(opts)
1636 timer, fm = gettimer(ui, opts)
1652 timer, fm = gettimer(ui, opts)
1637
1653
1638 def d():
1654 def d():
1639 if os.name != r'nt':
1655 if os.name != r'nt':
1640 os.system(
1656 os.system(
1641 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1657 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1642 )
1658 )
1643 else:
1659 else:
1644 os.environ[r'HGRCPATH'] = r' '
1660 os.environ[r'HGRCPATH'] = r' '
1645 os.system(r"%s version -q > NUL" % sys.argv[0])
1661 os.system(r"%s version -q > NUL" % sys.argv[0])
1646
1662
1647 timer(d)
1663 timer(d)
1648 fm.end()
1664 fm.end()
1649
1665
1650
1666
1651 @command(b'perfparents', formatteropts)
1667 @command(b'perfparents', formatteropts)
1652 def perfparents(ui, repo, **opts):
1668 def perfparents(ui, repo, **opts):
1653 """benchmark the time necessary to fetch one changeset's parents.
1669 """benchmark the time necessary to fetch one changeset's parents.
1654
1670
1655 The fetch is done using the `node identifier`, traversing all object layers
1671 The fetch is done using the `node identifier`, traversing all object layers
1656 from the repository object. The first N revisions will be used for this
1672 from the repository object. The first N revisions will be used for this
1657 benchmark. N is controlled by the ``perf.parentscount`` config option
1673 benchmark. N is controlled by the ``perf.parentscount`` config option
1658 (default: 1000).
1674 (default: 1000).
1659 """
1675 """
1660 opts = _byteskwargs(opts)
1676 opts = _byteskwargs(opts)
1661 timer, fm = gettimer(ui, opts)
1677 timer, fm = gettimer(ui, opts)
1662 # control the number of commits perfparents iterates over
1678 # control the number of commits perfparents iterates over
1663 # experimental config: perf.parentscount
1679 # experimental config: perf.parentscount
1664 count = getint(ui, b"perf", b"parentscount", 1000)
1680 count = getint(ui, b"perf", b"parentscount", 1000)
1665 if len(repo.changelog) < count:
1681 if len(repo.changelog) < count:
1666 raise error.Abort(b"repo needs %d commits for this test" % count)
1682 raise error.Abort(b"repo needs %d commits for this test" % count)
1667 repo = repo.unfiltered()
1683 repo = repo.unfiltered()
1668 nl = [repo.changelog.node(i) for i in _xrange(count)]
1684 nl = [repo.changelog.node(i) for i in _xrange(count)]
1669
1685
1670 def d():
1686 def d():
1671 for n in nl:
1687 for n in nl:
1672 repo.changelog.parents(n)
1688 repo.changelog.parents(n)
1673
1689
1674 timer(d)
1690 timer(d)
1675 fm.end()
1691 fm.end()
1676
1692
1677
1693
1678 @command(b'perfctxfiles', formatteropts)
1694 @command(b'perfctxfiles', formatteropts)
1679 def perfctxfiles(ui, repo, x, **opts):
1695 def perfctxfiles(ui, repo, x, **opts):
1680 opts = _byteskwargs(opts)
1696 opts = _byteskwargs(opts)
1681 x = int(x)
1697 x = int(x)
1682 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1683
1699
1684 def d():
1700 def d():
1685 len(repo[x].files())
1701 len(repo[x].files())
1686
1702
1687 timer(d)
1703 timer(d)
1688 fm.end()
1704 fm.end()
1689
1705
1690
1706
1691 @command(b'perfrawfiles', formatteropts)
1707 @command(b'perfrawfiles', formatteropts)
1692 def perfrawfiles(ui, repo, x, **opts):
1708 def perfrawfiles(ui, repo, x, **opts):
1693 opts = _byteskwargs(opts)
1709 opts = _byteskwargs(opts)
1694 x = int(x)
1710 x = int(x)
1695 timer, fm = gettimer(ui, opts)
1711 timer, fm = gettimer(ui, opts)
1696 cl = repo.changelog
1712 cl = repo.changelog
1697
1713
1698 def d():
1714 def d():
1699 len(cl.read(x)[3])
1715 len(cl.read(x)[3])
1700
1716
1701 timer(d)
1717 timer(d)
1702 fm.end()
1718 fm.end()
1703
1719
1704
1720
1705 @command(b'perflookup', formatteropts)
1721 @command(b'perflookup', formatteropts)
1706 def perflookup(ui, repo, rev, **opts):
1722 def perflookup(ui, repo, rev, **opts):
1707 opts = _byteskwargs(opts)
1723 opts = _byteskwargs(opts)
1708 timer, fm = gettimer(ui, opts)
1724 timer, fm = gettimer(ui, opts)
1709 timer(lambda: len(repo.lookup(rev)))
1725 timer(lambda: len(repo.lookup(rev)))
1710 fm.end()
1726 fm.end()
1711
1727
1712
1728
1713 @command(
1729 @command(
1714 b'perflinelogedits',
1730 b'perflinelogedits',
1715 [
1731 [
1716 (b'n', b'edits', 10000, b'number of edits'),
1732 (b'n', b'edits', 10000, b'number of edits'),
1717 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1733 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1718 ],
1734 ],
1719 norepo=True,
1735 norepo=True,
1720 )
1736 )
1721 def perflinelogedits(ui, **opts):
1737 def perflinelogedits(ui, **opts):
1722 from mercurial import linelog
1738 from mercurial import linelog
1723
1739
1724 opts = _byteskwargs(opts)
1740 opts = _byteskwargs(opts)
1725
1741
1726 edits = opts[b'edits']
1742 edits = opts[b'edits']
1727 maxhunklines = opts[b'max_hunk_lines']
1743 maxhunklines = opts[b'max_hunk_lines']
1728
1744
1729 maxb1 = 100000
1745 maxb1 = 100000
1730 random.seed(0)
1746 random.seed(0)
1731 randint = random.randint
1747 randint = random.randint
1732 currentlines = 0
1748 currentlines = 0
1733 arglist = []
1749 arglist = []
1734 for rev in _xrange(edits):
1750 for rev in _xrange(edits):
1735 a1 = randint(0, currentlines)
1751 a1 = randint(0, currentlines)
1736 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1752 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1737 b1 = randint(0, maxb1)
1753 b1 = randint(0, maxb1)
1738 b2 = randint(b1, b1 + maxhunklines)
1754 b2 = randint(b1, b1 + maxhunklines)
1739 currentlines += (b2 - b1) - (a2 - a1)
1755 currentlines += (b2 - b1) - (a2 - a1)
1740 arglist.append((rev, a1, a2, b1, b2))
1756 arglist.append((rev, a1, a2, b1, b2))
1741
1757
1742 def d():
1758 def d():
1743 ll = linelog.linelog()
1759 ll = linelog.linelog()
1744 for args in arglist:
1760 for args in arglist:
1745 ll.replacelines(*args)
1761 ll.replacelines(*args)
1746
1762
1747 timer, fm = gettimer(ui, opts)
1763 timer, fm = gettimer(ui, opts)
1748 timer(d)
1764 timer(d)
1749 fm.end()
1765 fm.end()
1750
1766
1751
1767
1752 @command(b'perfrevrange', formatteropts)
1768 @command(b'perfrevrange', formatteropts)
1753 def perfrevrange(ui, repo, *specs, **opts):
1769 def perfrevrange(ui, repo, *specs, **opts):
1754 opts = _byteskwargs(opts)
1770 opts = _byteskwargs(opts)
1755 timer, fm = gettimer(ui, opts)
1771 timer, fm = gettimer(ui, opts)
1756 revrange = scmutil.revrange
1772 revrange = scmutil.revrange
1757 timer(lambda: len(revrange(repo, specs)))
1773 timer(lambda: len(revrange(repo, specs)))
1758 fm.end()
1774 fm.end()
1759
1775
1760
1776
1761 @command(b'perfnodelookup', formatteropts)
1777 @command(b'perfnodelookup', formatteropts)
1762 def perfnodelookup(ui, repo, rev, **opts):
1778 def perfnodelookup(ui, repo, rev, **opts):
1763 opts = _byteskwargs(opts)
1779 opts = _byteskwargs(opts)
1764 timer, fm = gettimer(ui, opts)
1780 timer, fm = gettimer(ui, opts)
1765 import mercurial.revlog
1781 import mercurial.revlog
1766
1782
1767 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1783 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1768 n = scmutil.revsingle(repo, rev).node()
1784 n = scmutil.revsingle(repo, rev).node()
1769 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1785 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1770
1786
1771 def d():
1787 def d():
1772 cl.rev(n)
1788 cl.rev(n)
1773 clearcaches(cl)
1789 clearcaches(cl)
1774
1790
1775 timer(d)
1791 timer(d)
1776 fm.end()
1792 fm.end()
1777
1793
1778
1794
1779 @command(
1795 @command(
1780 b'perflog',
1796 b'perflog',
1781 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1797 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1782 )
1798 )
1783 def perflog(ui, repo, rev=None, **opts):
1799 def perflog(ui, repo, rev=None, **opts):
1784 opts = _byteskwargs(opts)
1800 opts = _byteskwargs(opts)
1785 if rev is None:
1801 if rev is None:
1786 rev = []
1802 rev = []
1787 timer, fm = gettimer(ui, opts)
1803 timer, fm = gettimer(ui, opts)
1788 ui.pushbuffer()
1804 ui.pushbuffer()
1789 timer(
1805 timer(
1790 lambda: commands.log(
1806 lambda: commands.log(
1791 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1807 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1792 )
1808 )
1793 )
1809 )
1794 ui.popbuffer()
1810 ui.popbuffer()
1795 fm.end()
1811 fm.end()
1796
1812
1797
1813
1798 @command(b'perfmoonwalk', formatteropts)
1814 @command(b'perfmoonwalk', formatteropts)
1799 def perfmoonwalk(ui, repo, **opts):
1815 def perfmoonwalk(ui, repo, **opts):
1800 """benchmark walking the changelog backwards
1816 """benchmark walking the changelog backwards
1801
1817
1802 This also loads the changelog data for each revision in the changelog.
1818 This also loads the changelog data for each revision in the changelog.
1803 """
1819 """
1804 opts = _byteskwargs(opts)
1820 opts = _byteskwargs(opts)
1805 timer, fm = gettimer(ui, opts)
1821 timer, fm = gettimer(ui, opts)
1806
1822
1807 def moonwalk():
1823 def moonwalk():
1808 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1824 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1809 ctx = repo[i]
1825 ctx = repo[i]
1810 ctx.branch() # read changelog data (in addition to the index)
1826 ctx.branch() # read changelog data (in addition to the index)
1811
1827
1812 timer(moonwalk)
1828 timer(moonwalk)
1813 fm.end()
1829 fm.end()
1814
1830
1815
1831
1816 @command(
1832 @command(
1817 b'perftemplating',
1833 b'perftemplating',
1818 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1834 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1819 )
1835 )
1820 def perftemplating(ui, repo, testedtemplate=None, **opts):
1836 def perftemplating(ui, repo, testedtemplate=None, **opts):
1821 """test the rendering time of a given template"""
1837 """test the rendering time of a given template"""
1822 if makelogtemplater is None:
1838 if makelogtemplater is None:
1823 raise error.Abort(
1839 raise error.Abort(
1824 b"perftemplating not available with this Mercurial",
1840 b"perftemplating not available with this Mercurial",
1825 hint=b"use 4.3 or later",
1841 hint=b"use 4.3 or later",
1826 )
1842 )
1827
1843
1828 opts = _byteskwargs(opts)
1844 opts = _byteskwargs(opts)
1829
1845
1830 nullui = ui.copy()
1846 nullui = ui.copy()
1831 nullui.fout = open(os.devnull, r'wb')
1847 nullui.fout = open(os.devnull, r'wb')
1832 nullui.disablepager()
1848 nullui.disablepager()
1833 revs = opts.get(b'rev')
1849 revs = opts.get(b'rev')
1834 if not revs:
1850 if not revs:
1835 revs = [b'all()']
1851 revs = [b'all()']
1836 revs = list(scmutil.revrange(repo, revs))
1852 revs = list(scmutil.revrange(repo, revs))
1837
1853
1838 defaulttemplate = (
1854 defaulttemplate = (
1839 b'{date|shortdate} [{rev}:{node|short}]'
1855 b'{date|shortdate} [{rev}:{node|short}]'
1840 b' {author|person}: {desc|firstline}\n'
1856 b' {author|person}: {desc|firstline}\n'
1841 )
1857 )
1842 if testedtemplate is None:
1858 if testedtemplate is None:
1843 testedtemplate = defaulttemplate
1859 testedtemplate = defaulttemplate
1844 displayer = makelogtemplater(nullui, repo, testedtemplate)
1860 displayer = makelogtemplater(nullui, repo, testedtemplate)
1845
1861
1846 def format():
1862 def format():
1847 for r in revs:
1863 for r in revs:
1848 ctx = repo[r]
1864 ctx = repo[r]
1849 displayer.show(ctx)
1865 displayer.show(ctx)
1850 displayer.flush(ctx)
1866 displayer.flush(ctx)
1851
1867
1852 timer, fm = gettimer(ui, opts)
1868 timer, fm = gettimer(ui, opts)
1853 timer(format)
1869 timer(format)
1854 fm.end()
1870 fm.end()
1855
1871
1856
1872
1857 def _displaystats(ui, opts, entries, data):
1873 def _displaystats(ui, opts, entries, data):
1858 pass
1874 pass
1859 # use a second formatter because the data are quite different, not sure
1875 # use a second formatter because the data are quite different, not sure
1860 # how it flies with the templater.
1876 # how it flies with the templater.
1861 fm = ui.formatter(b'perf-stats', opts)
1877 fm = ui.formatter(b'perf-stats', opts)
1862 for key, title in entries:
1878 for key, title in entries:
1863 values = data[key]
1879 values = data[key]
1864 nbvalues = len(data)
1880 nbvalues = len(data)
1865 values.sort()
1881 values.sort()
1866 stats = {
1882 stats = {
1867 'key': key,
1883 'key': key,
1868 'title': title,
1884 'title': title,
1869 'nbitems': len(values),
1885 'nbitems': len(values),
1870 'min': values[0][0],
1886 'min': values[0][0],
1871 '10%': values[(nbvalues * 10) // 100][0],
1887 '10%': values[(nbvalues * 10) // 100][0],
1872 '25%': values[(nbvalues * 25) // 100][0],
1888 '25%': values[(nbvalues * 25) // 100][0],
1873 '50%': values[(nbvalues * 50) // 100][0],
1889 '50%': values[(nbvalues * 50) // 100][0],
1874 '75%': values[(nbvalues * 75) // 100][0],
1890 '75%': values[(nbvalues * 75) // 100][0],
1875 '80%': values[(nbvalues * 80) // 100][0],
1891 '80%': values[(nbvalues * 80) // 100][0],
1876 '85%': values[(nbvalues * 85) // 100][0],
1892 '85%': values[(nbvalues * 85) // 100][0],
1877 '90%': values[(nbvalues * 90) // 100][0],
1893 '90%': values[(nbvalues * 90) // 100][0],
1878 '95%': values[(nbvalues * 95) // 100][0],
1894 '95%': values[(nbvalues * 95) // 100][0],
1879 '99%': values[(nbvalues * 99) // 100][0],
1895 '99%': values[(nbvalues * 99) // 100][0],
1880 'max': values[-1][0],
1896 'max': values[-1][0],
1881 }
1897 }
1882 fm.startitem()
1898 fm.startitem()
1883 fm.data(**stats)
1899 fm.data(**stats)
1884 # make node pretty for the human output
1900 # make node pretty for the human output
1885 fm.plain('### %s (%d items)\n' % (title, len(values)))
1901 fm.plain('### %s (%d items)\n' % (title, len(values)))
1886 lines = [
1902 lines = [
1887 'min',
1903 'min',
1888 '10%',
1904 '10%',
1889 '25%',
1905 '25%',
1890 '50%',
1906 '50%',
1891 '75%',
1907 '75%',
1892 '80%',
1908 '80%',
1893 '85%',
1909 '85%',
1894 '90%',
1910 '90%',
1895 '95%',
1911 '95%',
1896 '99%',
1912 '99%',
1897 'max',
1913 'max',
1898 ]
1914 ]
1899 for l in lines:
1915 for l in lines:
1900 fm.plain('%s: %s\n' % (l, stats[l]))
1916 fm.plain('%s: %s\n' % (l, stats[l]))
1901 fm.end()
1917 fm.end()
1902
1918
1903
1919
1904 @command(
1920 @command(
1905 b'perfhelper-mergecopies',
1921 b'perfhelper-mergecopies',
1906 formatteropts
1922 formatteropts
1907 + [
1923 + [
1908 (b'r', b'revs', [], b'restrict search to these revisions'),
1924 (b'r', b'revs', [], b'restrict search to these revisions'),
1909 (b'', b'timing', False, b'provides extra data (costly)'),
1925 (b'', b'timing', False, b'provides extra data (costly)'),
1910 (b'', b'stats', False, b'provides statistic about the measured data'),
1926 (b'', b'stats', False, b'provides statistic about the measured data'),
1911 ],
1927 ],
1912 )
1928 )
1913 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1929 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1914 """find statistics about potential parameters for `perfmergecopies`
1930 """find statistics about potential parameters for `perfmergecopies`
1915
1931
1916 This command find (base, p1, p2) triplet relevant for copytracing
1932 This command find (base, p1, p2) triplet relevant for copytracing
1917 benchmarking in the context of a merge. It reports values for some of the
1933 benchmarking in the context of a merge. It reports values for some of the
1918 parameters that impact merge copy tracing time during merge.
1934 parameters that impact merge copy tracing time during merge.
1919
1935
1920 If `--timing` is set, rename detection is run and the associated timing
1936 If `--timing` is set, rename detection is run and the associated timing
1921 will be reported. The extra details come at the cost of slower command
1937 will be reported. The extra details come at the cost of slower command
1922 execution.
1938 execution.
1923
1939
1924 Since rename detection is only run once, other factors might easily
1940 Since rename detection is only run once, other factors might easily
1925 affect the precision of the timing. However it should give a good
1941 affect the precision of the timing. However it should give a good
1926 approximation of which revision triplets are very costly.
1942 approximation of which revision triplets are very costly.
1927 """
1943 """
1928 opts = _byteskwargs(opts)
1944 opts = _byteskwargs(opts)
1929 fm = ui.formatter(b'perf', opts)
1945 fm = ui.formatter(b'perf', opts)
1930 dotiming = opts[b'timing']
1946 dotiming = opts[b'timing']
1931 dostats = opts[b'stats']
1947 dostats = opts[b'stats']
1932
1948
1933 output_template = [
1949 output_template = [
1934 ("base", "%(base)12s"),
1950 ("base", "%(base)12s"),
1935 ("p1", "%(p1.node)12s"),
1951 ("p1", "%(p1.node)12s"),
1936 ("p2", "%(p2.node)12s"),
1952 ("p2", "%(p2.node)12s"),
1937 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1953 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1938 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1954 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1939 ("p1.renames", "%(p1.renamedfiles)12d"),
1955 ("p1.renames", "%(p1.renamedfiles)12d"),
1940 ("p1.time", "%(p1.time)12.3f"),
1956 ("p1.time", "%(p1.time)12.3f"),
1941 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1957 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1942 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1958 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1943 ("p2.renames", "%(p2.renamedfiles)12d"),
1959 ("p2.renames", "%(p2.renamedfiles)12d"),
1944 ("p2.time", "%(p2.time)12.3f"),
1960 ("p2.time", "%(p2.time)12.3f"),
1945 ("renames", "%(nbrenamedfiles)12d"),
1961 ("renames", "%(nbrenamedfiles)12d"),
1946 ("total.time", "%(time)12.3f"),
1962 ("total.time", "%(time)12.3f"),
1947 ]
1963 ]
1948 if not dotiming:
1964 if not dotiming:
1949 output_template = [
1965 output_template = [
1950 i
1966 i
1951 for i in output_template
1967 for i in output_template
1952 if not ('time' in i[0] or 'renames' in i[0])
1968 if not ('time' in i[0] or 'renames' in i[0])
1953 ]
1969 ]
1954 header_names = [h for (h, v) in output_template]
1970 header_names = [h for (h, v) in output_template]
1955 output = ' '.join([v for (h, v) in output_template]) + '\n'
1971 output = ' '.join([v for (h, v) in output_template]) + '\n'
1956 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1972 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1957 fm.plain(header % tuple(header_names))
1973 fm.plain(header % tuple(header_names))
1958
1974
1959 if not revs:
1975 if not revs:
1960 revs = ['all()']
1976 revs = ['all()']
1961 revs = scmutil.revrange(repo, revs)
1977 revs = scmutil.revrange(repo, revs)
1962
1978
1963 if dostats:
1979 if dostats:
1964 alldata = {
1980 alldata = {
1965 'nbrevs': [],
1981 'nbrevs': [],
1966 'nbmissingfiles': [],
1982 'nbmissingfiles': [],
1967 }
1983 }
1968 if dotiming:
1984 if dotiming:
1969 alldata['parentnbrenames'] = []
1985 alldata['parentnbrenames'] = []
1970 alldata['totalnbrenames'] = []
1986 alldata['totalnbrenames'] = []
1971 alldata['parenttime'] = []
1987 alldata['parenttime'] = []
1972 alldata['totaltime'] = []
1988 alldata['totaltime'] = []
1973
1989
1974 roi = repo.revs('merge() and %ld', revs)
1990 roi = repo.revs('merge() and %ld', revs)
1975 for r in roi:
1991 for r in roi:
1976 ctx = repo[r]
1992 ctx = repo[r]
1977 p1 = ctx.p1()
1993 p1 = ctx.p1()
1978 p2 = ctx.p2()
1994 p2 = ctx.p2()
1979 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1995 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1980 for b in bases:
1996 for b in bases:
1981 b = repo[b]
1997 b = repo[b]
1982 p1missing = copies._computeforwardmissing(b, p1)
1998 p1missing = copies._computeforwardmissing(b, p1)
1983 p2missing = copies._computeforwardmissing(b, p2)
1999 p2missing = copies._computeforwardmissing(b, p2)
1984 data = {
2000 data = {
1985 b'base': b.hex(),
2001 b'base': b.hex(),
1986 b'p1.node': p1.hex(),
2002 b'p1.node': p1.hex(),
1987 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2003 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
1988 b'p1.nbmissingfiles': len(p1missing),
2004 b'p1.nbmissingfiles': len(p1missing),
1989 b'p2.node': p2.hex(),
2005 b'p2.node': p2.hex(),
1990 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2006 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
1991 b'p2.nbmissingfiles': len(p2missing),
2007 b'p2.nbmissingfiles': len(p2missing),
1992 }
2008 }
1993 if dostats:
2009 if dostats:
1994 if p1missing:
2010 if p1missing:
1995 alldata['nbrevs'].append(
2011 alldata['nbrevs'].append(
1996 (data['p1.nbrevs'], b.hex(), p1.hex())
2012 (data['p1.nbrevs'], b.hex(), p1.hex())
1997 )
2013 )
1998 alldata['nbmissingfiles'].append(
2014 alldata['nbmissingfiles'].append(
1999 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2015 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2000 )
2016 )
2001 if p2missing:
2017 if p2missing:
2002 alldata['nbrevs'].append(
2018 alldata['nbrevs'].append(
2003 (data['p2.nbrevs'], b.hex(), p2.hex())
2019 (data['p2.nbrevs'], b.hex(), p2.hex())
2004 )
2020 )
2005 alldata['nbmissingfiles'].append(
2021 alldata['nbmissingfiles'].append(
2006 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2022 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2007 )
2023 )
2008 if dotiming:
2024 if dotiming:
2009 begin = util.timer()
2025 begin = util.timer()
2010 mergedata = copies.mergecopies(repo, p1, p2, b)
2026 mergedata = copies.mergecopies(repo, p1, p2, b)
2011 end = util.timer()
2027 end = util.timer()
2012 # not very stable timing since we did only one run
2028 # not very stable timing since we did only one run
2013 data['time'] = end - begin
2029 data['time'] = end - begin
2014 # mergedata contains five dicts: "copy", "movewithdir",
2030 # mergedata contains five dicts: "copy", "movewithdir",
2015 # "diverge", "renamedelete" and "dirmove".
2031 # "diverge", "renamedelete" and "dirmove".
2016 # The first 4 are about renamed file so lets count that.
2032 # The first 4 are about renamed file so lets count that.
2017 renames = len(mergedata[0])
2033 renames = len(mergedata[0])
2018 renames += len(mergedata[1])
2034 renames += len(mergedata[1])
2019 renames += len(mergedata[2])
2035 renames += len(mergedata[2])
2020 renames += len(mergedata[3])
2036 renames += len(mergedata[3])
2021 data['nbrenamedfiles'] = renames
2037 data['nbrenamedfiles'] = renames
2022 begin = util.timer()
2038 begin = util.timer()
2023 p1renames = copies.pathcopies(b, p1)
2039 p1renames = copies.pathcopies(b, p1)
2024 end = util.timer()
2040 end = util.timer()
2025 data['p1.time'] = end - begin
2041 data['p1.time'] = end - begin
2026 begin = util.timer()
2042 begin = util.timer()
2027 p2renames = copies.pathcopies(b, p2)
2043 p2renames = copies.pathcopies(b, p2)
2028 data['p2.time'] = end - begin
2044 data['p2.time'] = end - begin
2029 end = util.timer()
2045 end = util.timer()
2030 data['p1.renamedfiles'] = len(p1renames)
2046 data['p1.renamedfiles'] = len(p1renames)
2031 data['p2.renamedfiles'] = len(p2renames)
2047 data['p2.renamedfiles'] = len(p2renames)
2032
2048
2033 if dostats:
2049 if dostats:
2034 if p1missing:
2050 if p1missing:
2035 alldata['parentnbrenames'].append(
2051 alldata['parentnbrenames'].append(
2036 (data['p1.renamedfiles'], b.hex(), p1.hex())
2052 (data['p1.renamedfiles'], b.hex(), p1.hex())
2037 )
2053 )
2038 alldata['parenttime'].append(
2054 alldata['parenttime'].append(
2039 (data['p1.time'], b.hex(), p1.hex())
2055 (data['p1.time'], b.hex(), p1.hex())
2040 )
2056 )
2041 if p2missing:
2057 if p2missing:
2042 alldata['parentnbrenames'].append(
2058 alldata['parentnbrenames'].append(
2043 (data['p2.renamedfiles'], b.hex(), p2.hex())
2059 (data['p2.renamedfiles'], b.hex(), p2.hex())
2044 )
2060 )
2045 alldata['parenttime'].append(
2061 alldata['parenttime'].append(
2046 (data['p2.time'], b.hex(), p2.hex())
2062 (data['p2.time'], b.hex(), p2.hex())
2047 )
2063 )
2048 if p1missing or p2missing:
2064 if p1missing or p2missing:
2049 alldata['totalnbrenames'].append(
2065 alldata['totalnbrenames'].append(
2050 (
2066 (
2051 data['nbrenamedfiles'],
2067 data['nbrenamedfiles'],
2052 b.hex(),
2068 b.hex(),
2053 p1.hex(),
2069 p1.hex(),
2054 p2.hex(),
2070 p2.hex(),
2055 )
2071 )
2056 )
2072 )
2057 alldata['totaltime'].append(
2073 alldata['totaltime'].append(
2058 (data['time'], b.hex(), p1.hex(), p2.hex())
2074 (data['time'], b.hex(), p1.hex(), p2.hex())
2059 )
2075 )
2060 fm.startitem()
2076 fm.startitem()
2061 fm.data(**data)
2077 fm.data(**data)
2062 # make node pretty for the human output
2078 # make node pretty for the human output
2063 out = data.copy()
2079 out = data.copy()
2064 out['base'] = fm.hexfunc(b.node())
2080 out['base'] = fm.hexfunc(b.node())
2065 out['p1.node'] = fm.hexfunc(p1.node())
2081 out['p1.node'] = fm.hexfunc(p1.node())
2066 out['p2.node'] = fm.hexfunc(p2.node())
2082 out['p2.node'] = fm.hexfunc(p2.node())
2067 fm.plain(output % out)
2083 fm.plain(output % out)
2068
2084
2069 fm.end()
2085 fm.end()
2070 if dostats:
2086 if dostats:
2071 # use a second formatter because the data are quite different, not sure
2087 # use a second formatter because the data are quite different, not sure
2072 # how it flies with the templater.
2088 # how it flies with the templater.
2073 entries = [
2089 entries = [
2074 ('nbrevs', 'number of revision covered'),
2090 ('nbrevs', 'number of revision covered'),
2075 ('nbmissingfiles', 'number of missing files at head'),
2091 ('nbmissingfiles', 'number of missing files at head'),
2076 ]
2092 ]
2077 if dotiming:
2093 if dotiming:
2078 entries.append(
2094 entries.append(
2079 ('parentnbrenames', 'rename from one parent to base')
2095 ('parentnbrenames', 'rename from one parent to base')
2080 )
2096 )
2081 entries.append(('totalnbrenames', 'total number of renames'))
2097 entries.append(('totalnbrenames', 'total number of renames'))
2082 entries.append(('parenttime', 'time for one parent'))
2098 entries.append(('parenttime', 'time for one parent'))
2083 entries.append(('totaltime', 'time for both parents'))
2099 entries.append(('totaltime', 'time for both parents'))
2084 _displaystats(ui, opts, entries, alldata)
2100 _displaystats(ui, opts, entries, alldata)
2085
2101
2086
2102
2087 @command(
2103 @command(
2088 b'perfhelper-pathcopies',
2104 b'perfhelper-pathcopies',
2089 formatteropts
2105 formatteropts
2090 + [
2106 + [
2091 (b'r', b'revs', [], b'restrict search to these revisions'),
2107 (b'r', b'revs', [], b'restrict search to these revisions'),
2092 (b'', b'timing', False, b'provides extra data (costly)'),
2108 (b'', b'timing', False, b'provides extra data (costly)'),
2093 (b'', b'stats', False, b'provides statistic about the measured data'),
2109 (b'', b'stats', False, b'provides statistic about the measured data'),
2094 ],
2110 ],
2095 )
2111 )
2096 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2112 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2097 """find statistic about potential parameters for the `perftracecopies`
2113 """find statistic about potential parameters for the `perftracecopies`
2098
2114
2099 This command find source-destination pair relevant for copytracing testing.
2115 This command find source-destination pair relevant for copytracing testing.
2100 It report value for some of the parameters that impact copy tracing time.
2116 It report value for some of the parameters that impact copy tracing time.
2101
2117
2102 If `--timing` is set, rename detection is run and the associated timing
2118 If `--timing` is set, rename detection is run and the associated timing
2103 will be reported. The extra details comes at the cost of a slower command
2119 will be reported. The extra details comes at the cost of a slower command
2104 execution.
2120 execution.
2105
2121
2106 Since the rename detection is only run once, other factors might easily
2122 Since the rename detection is only run once, other factors might easily
2107 affect the precision of the timing. However it should give a good
2123 affect the precision of the timing. However it should give a good
2108 approximation of which revision pairs are very costly.
2124 approximation of which revision pairs are very costly.
2109 """
2125 """
2110 opts = _byteskwargs(opts)
2126 opts = _byteskwargs(opts)
2111 fm = ui.formatter(b'perf', opts)
2127 fm = ui.formatter(b'perf', opts)
2112 dotiming = opts[b'timing']
2128 dotiming = opts[b'timing']
2113 dostats = opts[b'stats']
2129 dostats = opts[b'stats']
2114
2130
2115 if dotiming:
2131 if dotiming:
2116 header = '%12s %12s %12s %12s %12s %12s\n'
2132 header = '%12s %12s %12s %12s %12s %12s\n'
2117 output = (
2133 output = (
2118 "%(source)12s %(destination)12s "
2134 "%(source)12s %(destination)12s "
2119 "%(nbrevs)12d %(nbmissingfiles)12d "
2135 "%(nbrevs)12d %(nbmissingfiles)12d "
2120 "%(nbrenamedfiles)12d %(time)18.5f\n"
2136 "%(nbrenamedfiles)12d %(time)18.5f\n"
2121 )
2137 )
2122 header_names = (
2138 header_names = (
2123 "source",
2139 "source",
2124 "destination",
2140 "destination",
2125 "nb-revs",
2141 "nb-revs",
2126 "nb-files",
2142 "nb-files",
2127 "nb-renames",
2143 "nb-renames",
2128 "time",
2144 "time",
2129 )
2145 )
2130 fm.plain(header % header_names)
2146 fm.plain(header % header_names)
2131 else:
2147 else:
2132 header = '%12s %12s %12s %12s\n'
2148 header = '%12s %12s %12s %12s\n'
2133 output = (
2149 output = (
2134 "%(source)12s %(destination)12s "
2150 "%(source)12s %(destination)12s "
2135 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2151 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2136 )
2152 )
2137 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2153 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2138
2154
2139 if not revs:
2155 if not revs:
2140 revs = ['all()']
2156 revs = ['all()']
2141 revs = scmutil.revrange(repo, revs)
2157 revs = scmutil.revrange(repo, revs)
2142
2158
2143 if dostats:
2159 if dostats:
2144 alldata = {
2160 alldata = {
2145 'nbrevs': [],
2161 'nbrevs': [],
2146 'nbmissingfiles': [],
2162 'nbmissingfiles': [],
2147 }
2163 }
2148 if dotiming:
2164 if dotiming:
2149 alldata['nbrenames'] = []
2165 alldata['nbrenames'] = []
2150 alldata['time'] = []
2166 alldata['time'] = []
2151
2167
2152 roi = repo.revs('merge() and %ld', revs)
2168 roi = repo.revs('merge() and %ld', revs)
2153 for r in roi:
2169 for r in roi:
2154 ctx = repo[r]
2170 ctx = repo[r]
2155 p1 = ctx.p1().rev()
2171 p1 = ctx.p1().rev()
2156 p2 = ctx.p2().rev()
2172 p2 = ctx.p2().rev()
2157 bases = repo.changelog._commonancestorsheads(p1, p2)
2173 bases = repo.changelog._commonancestorsheads(p1, p2)
2158 for p in (p1, p2):
2174 for p in (p1, p2):
2159 for b in bases:
2175 for b in bases:
2160 base = repo[b]
2176 base = repo[b]
2161 parent = repo[p]
2177 parent = repo[p]
2162 missing = copies._computeforwardmissing(base, parent)
2178 missing = copies._computeforwardmissing(base, parent)
2163 if not missing:
2179 if not missing:
2164 continue
2180 continue
2165 data = {
2181 data = {
2166 b'source': base.hex(),
2182 b'source': base.hex(),
2167 b'destination': parent.hex(),
2183 b'destination': parent.hex(),
2168 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2184 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2169 b'nbmissingfiles': len(missing),
2185 b'nbmissingfiles': len(missing),
2170 }
2186 }
2171 if dostats:
2187 if dostats:
2172 alldata['nbrevs'].append(
2188 alldata['nbrevs'].append(
2173 (data['nbrevs'], base.hex(), parent.hex(),)
2189 (data['nbrevs'], base.hex(), parent.hex(),)
2174 )
2190 )
2175 alldata['nbmissingfiles'].append(
2191 alldata['nbmissingfiles'].append(
2176 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2192 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2177 )
2193 )
2178 if dotiming:
2194 if dotiming:
2179 begin = util.timer()
2195 begin = util.timer()
2180 renames = copies.pathcopies(base, parent)
2196 renames = copies.pathcopies(base, parent)
2181 end = util.timer()
2197 end = util.timer()
2182 # not very stable timing since we did only one run
2198 # not very stable timing since we did only one run
2183 data['time'] = end - begin
2199 data['time'] = end - begin
2184 data['nbrenamedfiles'] = len(renames)
2200 data['nbrenamedfiles'] = len(renames)
2185 if dostats:
2201 if dostats:
2186 alldata['time'].append(
2202 alldata['time'].append(
2187 (data['time'], base.hex(), parent.hex(),)
2203 (data['time'], base.hex(), parent.hex(),)
2188 )
2204 )
2189 alldata['nbrenames'].append(
2205 alldata['nbrenames'].append(
2190 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2206 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2191 )
2207 )
2192 fm.startitem()
2208 fm.startitem()
2193 fm.data(**data)
2209 fm.data(**data)
2194 out = data.copy()
2210 out = data.copy()
2195 out['source'] = fm.hexfunc(base.node())
2211 out['source'] = fm.hexfunc(base.node())
2196 out['destination'] = fm.hexfunc(parent.node())
2212 out['destination'] = fm.hexfunc(parent.node())
2197 fm.plain(output % out)
2213 fm.plain(output % out)
2198
2214
2199 fm.end()
2215 fm.end()
2200 if dostats:
2216 if dostats:
2201 # use a second formatter because the data are quite different, not sure
2217 # use a second formatter because the data are quite different, not sure
2202 # how it flies with the templater.
2218 # how it flies with the templater.
2203 fm = ui.formatter(b'perf', opts)
2219 fm = ui.formatter(b'perf', opts)
2204 entries = [
2220 entries = [
2205 ('nbrevs', 'number of revision covered'),
2221 ('nbrevs', 'number of revision covered'),
2206 ('nbmissingfiles', 'number of missing files at head'),
2222 ('nbmissingfiles', 'number of missing files at head'),
2207 ]
2223 ]
2208 if dotiming:
2224 if dotiming:
2209 entries.append(('nbrenames', 'renamed files'))
2225 entries.append(('nbrenames', 'renamed files'))
2210 entries.append(('time', 'time'))
2226 entries.append(('time', 'time'))
2211 _displaystats(ui, opts, entries, alldata)
2227 _displaystats(ui, opts, entries, alldata)
2212
2228
2213
2229
2214 @command(b'perfcca', formatteropts)
2230 @command(b'perfcca', formatteropts)
2215 def perfcca(ui, repo, **opts):
2231 def perfcca(ui, repo, **opts):
2216 opts = _byteskwargs(opts)
2232 opts = _byteskwargs(opts)
2217 timer, fm = gettimer(ui, opts)
2233 timer, fm = gettimer(ui, opts)
2218 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2234 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2219 fm.end()
2235 fm.end()
2220
2236
2221
2237
2222 @command(b'perffncacheload', formatteropts)
2238 @command(b'perffncacheload', formatteropts)
2223 def perffncacheload(ui, repo, **opts):
2239 def perffncacheload(ui, repo, **opts):
2224 opts = _byteskwargs(opts)
2240 opts = _byteskwargs(opts)
2225 timer, fm = gettimer(ui, opts)
2241 timer, fm = gettimer(ui, opts)
2226 s = repo.store
2242 s = repo.store
2227
2243
2228 def d():
2244 def d():
2229 s.fncache._load()
2245 s.fncache._load()
2230
2246
2231 timer(d)
2247 timer(d)
2232 fm.end()
2248 fm.end()
2233
2249
2234
2250
2235 @command(b'perffncachewrite', formatteropts)
2251 @command(b'perffncachewrite', formatteropts)
2236 def perffncachewrite(ui, repo, **opts):
2252 def perffncachewrite(ui, repo, **opts):
2237 opts = _byteskwargs(opts)
2253 opts = _byteskwargs(opts)
2238 timer, fm = gettimer(ui, opts)
2254 timer, fm = gettimer(ui, opts)
2239 s = repo.store
2255 s = repo.store
2240 lock = repo.lock()
2256 lock = repo.lock()
2241 s.fncache._load()
2257 s.fncache._load()
2242 tr = repo.transaction(b'perffncachewrite')
2258 tr = repo.transaction(b'perffncachewrite')
2243 tr.addbackup(b'fncache')
2259 tr.addbackup(b'fncache')
2244
2260
2245 def d():
2261 def d():
2246 s.fncache._dirty = True
2262 s.fncache._dirty = True
2247 s.fncache.write(tr)
2263 s.fncache.write(tr)
2248
2264
2249 timer(d)
2265 timer(d)
2250 tr.close()
2266 tr.close()
2251 lock.release()
2267 lock.release()
2252 fm.end()
2268 fm.end()
2253
2269
2254
2270
2255 @command(b'perffncacheencode', formatteropts)
2271 @command(b'perffncacheencode', formatteropts)
2256 def perffncacheencode(ui, repo, **opts):
2272 def perffncacheencode(ui, repo, **opts):
2257 opts = _byteskwargs(opts)
2273 opts = _byteskwargs(opts)
2258 timer, fm = gettimer(ui, opts)
2274 timer, fm = gettimer(ui, opts)
2259 s = repo.store
2275 s = repo.store
2260 s.fncache._load()
2276 s.fncache._load()
2261
2277
2262 def d():
2278 def d():
2263 for p in s.fncache.entries:
2279 for p in s.fncache.entries:
2264 s.encode(p)
2280 s.encode(p)
2265
2281
2266 timer(d)
2282 timer(d)
2267 fm.end()
2283 fm.end()
2268
2284
2269
2285
2270 def _bdiffworker(q, blocks, xdiff, ready, done):
2286 def _bdiffworker(q, blocks, xdiff, ready, done):
2271 while not done.is_set():
2287 while not done.is_set():
2272 pair = q.get()
2288 pair = q.get()
2273 while pair is not None:
2289 while pair is not None:
2274 if xdiff:
2290 if xdiff:
2275 mdiff.bdiff.xdiffblocks(*pair)
2291 mdiff.bdiff.xdiffblocks(*pair)
2276 elif blocks:
2292 elif blocks:
2277 mdiff.bdiff.blocks(*pair)
2293 mdiff.bdiff.blocks(*pair)
2278 else:
2294 else:
2279 mdiff.textdiff(*pair)
2295 mdiff.textdiff(*pair)
2280 q.task_done()
2296 q.task_done()
2281 pair = q.get()
2297 pair = q.get()
2282 q.task_done() # for the None one
2298 q.task_done() # for the None one
2283 with ready:
2299 with ready:
2284 ready.wait()
2300 ready.wait()
2285
2301
2286
2302
2287 def _manifestrevision(repo, mnode):
2303 def _manifestrevision(repo, mnode):
2288 ml = repo.manifestlog
2304 ml = repo.manifestlog
2289
2305
2290 if util.safehasattr(ml, b'getstorage'):
2306 if util.safehasattr(ml, b'getstorage'):
2291 store = ml.getstorage(b'')
2307 store = ml.getstorage(b'')
2292 else:
2308 else:
2293 store = ml._revlog
2309 store = ml._revlog
2294
2310
2295 return store.revision(mnode)
2311 return store.revision(mnode)
2296
2312
2297
2313
2298 @command(
2314 @command(
2299 b'perfbdiff',
2315 b'perfbdiff',
2300 revlogopts
2316 revlogopts
2301 + formatteropts
2317 + formatteropts
2302 + [
2318 + [
2303 (
2319 (
2304 b'',
2320 b'',
2305 b'count',
2321 b'count',
2306 1,
2322 1,
2307 b'number of revisions to test (when using --startrev)',
2323 b'number of revisions to test (when using --startrev)',
2308 ),
2324 ),
2309 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2325 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2310 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2326 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2311 (b'', b'blocks', False, b'test computing diffs into blocks'),
2327 (b'', b'blocks', False, b'test computing diffs into blocks'),
2312 (b'', b'xdiff', False, b'use xdiff algorithm'),
2328 (b'', b'xdiff', False, b'use xdiff algorithm'),
2313 ],
2329 ],
2314 b'-c|-m|FILE REV',
2330 b'-c|-m|FILE REV',
2315 )
2331 )
2316 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2332 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2317 """benchmark a bdiff between revisions
2333 """benchmark a bdiff between revisions
2318
2334
2319 By default, benchmark a bdiff between its delta parent and itself.
2335 By default, benchmark a bdiff between its delta parent and itself.
2320
2336
2321 With ``--count``, benchmark bdiffs between delta parents and self for N
2337 With ``--count``, benchmark bdiffs between delta parents and self for N
2322 revisions starting at the specified revision.
2338 revisions starting at the specified revision.
2323
2339
2324 With ``--alldata``, assume the requested revision is a changeset and
2340 With ``--alldata``, assume the requested revision is a changeset and
2325 measure bdiffs for all changes related to that changeset (manifest
2341 measure bdiffs for all changes related to that changeset (manifest
2326 and filelogs).
2342 and filelogs).
2327 """
2343 """
2328 opts = _byteskwargs(opts)
2344 opts = _byteskwargs(opts)
2329
2345
2330 if opts[b'xdiff'] and not opts[b'blocks']:
2346 if opts[b'xdiff'] and not opts[b'blocks']:
2331 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2347 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2332
2348
2333 if opts[b'alldata']:
2349 if opts[b'alldata']:
2334 opts[b'changelog'] = True
2350 opts[b'changelog'] = True
2335
2351
2336 if opts.get(b'changelog') or opts.get(b'manifest'):
2352 if opts.get(b'changelog') or opts.get(b'manifest'):
2337 file_, rev = None, file_
2353 file_, rev = None, file_
2338 elif rev is None:
2354 elif rev is None:
2339 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2355 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2340
2356
2341 blocks = opts[b'blocks']
2357 blocks = opts[b'blocks']
2342 xdiff = opts[b'xdiff']
2358 xdiff = opts[b'xdiff']
2343 textpairs = []
2359 textpairs = []
2344
2360
2345 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2361 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2346
2362
2347 startrev = r.rev(r.lookup(rev))
2363 startrev = r.rev(r.lookup(rev))
2348 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2364 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2349 if opts[b'alldata']:
2365 if opts[b'alldata']:
2350 # Load revisions associated with changeset.
2366 # Load revisions associated with changeset.
2351 ctx = repo[rev]
2367 ctx = repo[rev]
2352 mtext = _manifestrevision(repo, ctx.manifestnode())
2368 mtext = _manifestrevision(repo, ctx.manifestnode())
2353 for pctx in ctx.parents():
2369 for pctx in ctx.parents():
2354 pman = _manifestrevision(repo, pctx.manifestnode())
2370 pman = _manifestrevision(repo, pctx.manifestnode())
2355 textpairs.append((pman, mtext))
2371 textpairs.append((pman, mtext))
2356
2372
2357 # Load filelog revisions by iterating manifest delta.
2373 # Load filelog revisions by iterating manifest delta.
2358 man = ctx.manifest()
2374 man = ctx.manifest()
2359 pman = ctx.p1().manifest()
2375 pman = ctx.p1().manifest()
2360 for filename, change in pman.diff(man).items():
2376 for filename, change in pman.diff(man).items():
2361 fctx = repo.file(filename)
2377 fctx = repo.file(filename)
2362 f1 = fctx.revision(change[0][0] or -1)
2378 f1 = fctx.revision(change[0][0] or -1)
2363 f2 = fctx.revision(change[1][0] or -1)
2379 f2 = fctx.revision(change[1][0] or -1)
2364 textpairs.append((f1, f2))
2380 textpairs.append((f1, f2))
2365 else:
2381 else:
2366 dp = r.deltaparent(rev)
2382 dp = r.deltaparent(rev)
2367 textpairs.append((r.revision(dp), r.revision(rev)))
2383 textpairs.append((r.revision(dp), r.revision(rev)))
2368
2384
2369 withthreads = threads > 0
2385 withthreads = threads > 0
2370 if not withthreads:
2386 if not withthreads:
2371
2387
2372 def d():
2388 def d():
2373 for pair in textpairs:
2389 for pair in textpairs:
2374 if xdiff:
2390 if xdiff:
2375 mdiff.bdiff.xdiffblocks(*pair)
2391 mdiff.bdiff.xdiffblocks(*pair)
2376 elif blocks:
2392 elif blocks:
2377 mdiff.bdiff.blocks(*pair)
2393 mdiff.bdiff.blocks(*pair)
2378 else:
2394 else:
2379 mdiff.textdiff(*pair)
2395 mdiff.textdiff(*pair)
2380
2396
2381 else:
2397 else:
2382 q = queue()
2398 q = queue()
2383 for i in _xrange(threads):
2399 for i in _xrange(threads):
2384 q.put(None)
2400 q.put(None)
2385 ready = threading.Condition()
2401 ready = threading.Condition()
2386 done = threading.Event()
2402 done = threading.Event()
2387 for i in _xrange(threads):
2403 for i in _xrange(threads):
2388 threading.Thread(
2404 threading.Thread(
2389 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2405 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2390 ).start()
2406 ).start()
2391 q.join()
2407 q.join()
2392
2408
2393 def d():
2409 def d():
2394 for pair in textpairs:
2410 for pair in textpairs:
2395 q.put(pair)
2411 q.put(pair)
2396 for i in _xrange(threads):
2412 for i in _xrange(threads):
2397 q.put(None)
2413 q.put(None)
2398 with ready:
2414 with ready:
2399 ready.notify_all()
2415 ready.notify_all()
2400 q.join()
2416 q.join()
2401
2417
2402 timer, fm = gettimer(ui, opts)
2418 timer, fm = gettimer(ui, opts)
2403 timer(d)
2419 timer(d)
2404 fm.end()
2420 fm.end()
2405
2421
2406 if withthreads:
2422 if withthreads:
2407 done.set()
2423 done.set()
2408 for i in _xrange(threads):
2424 for i in _xrange(threads):
2409 q.put(None)
2425 q.put(None)
2410 with ready:
2426 with ready:
2411 ready.notify_all()
2427 ready.notify_all()
2412
2428
2413
2429
2414 @command(
2430 @command(
2415 b'perfunidiff',
2431 b'perfunidiff',
2416 revlogopts
2432 revlogopts
2417 + formatteropts
2433 + formatteropts
2418 + [
2434 + [
2419 (
2435 (
2420 b'',
2436 b'',
2421 b'count',
2437 b'count',
2422 1,
2438 1,
2423 b'number of revisions to test (when using --startrev)',
2439 b'number of revisions to test (when using --startrev)',
2424 ),
2440 ),
2425 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2441 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2426 ],
2442 ],
2427 b'-c|-m|FILE REV',
2443 b'-c|-m|FILE REV',
2428 )
2444 )
2429 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2445 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2430 """benchmark a unified diff between revisions
2446 """benchmark a unified diff between revisions
2431
2447
2432 This doesn't include any copy tracing - it's just a unified diff
2448 This doesn't include any copy tracing - it's just a unified diff
2433 of the texts.
2449 of the texts.
2434
2450
2435 By default, benchmark a diff between its delta parent and itself.
2451 By default, benchmark a diff between its delta parent and itself.
2436
2452
2437 With ``--count``, benchmark diffs between delta parents and self for N
2453 With ``--count``, benchmark diffs between delta parents and self for N
2438 revisions starting at the specified revision.
2454 revisions starting at the specified revision.
2439
2455
2440 With ``--alldata``, assume the requested revision is a changeset and
2456 With ``--alldata``, assume the requested revision is a changeset and
2441 measure diffs for all changes related to that changeset (manifest
2457 measure diffs for all changes related to that changeset (manifest
2442 and filelogs).
2458 and filelogs).
2443 """
2459 """
2444 opts = _byteskwargs(opts)
2460 opts = _byteskwargs(opts)
2445 if opts[b'alldata']:
2461 if opts[b'alldata']:
2446 opts[b'changelog'] = True
2462 opts[b'changelog'] = True
2447
2463
2448 if opts.get(b'changelog') or opts.get(b'manifest'):
2464 if opts.get(b'changelog') or opts.get(b'manifest'):
2449 file_, rev = None, file_
2465 file_, rev = None, file_
2450 elif rev is None:
2466 elif rev is None:
2451 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2467 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2452
2468
2453 textpairs = []
2469 textpairs = []
2454
2470
2455 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2471 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2456
2472
2457 startrev = r.rev(r.lookup(rev))
2473 startrev = r.rev(r.lookup(rev))
2458 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2474 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2459 if opts[b'alldata']:
2475 if opts[b'alldata']:
2460 # Load revisions associated with changeset.
2476 # Load revisions associated with changeset.
2461 ctx = repo[rev]
2477 ctx = repo[rev]
2462 mtext = _manifestrevision(repo, ctx.manifestnode())
2478 mtext = _manifestrevision(repo, ctx.manifestnode())
2463 for pctx in ctx.parents():
2479 for pctx in ctx.parents():
2464 pman = _manifestrevision(repo, pctx.manifestnode())
2480 pman = _manifestrevision(repo, pctx.manifestnode())
2465 textpairs.append((pman, mtext))
2481 textpairs.append((pman, mtext))
2466
2482
2467 # Load filelog revisions by iterating manifest delta.
2483 # Load filelog revisions by iterating manifest delta.
2468 man = ctx.manifest()
2484 man = ctx.manifest()
2469 pman = ctx.p1().manifest()
2485 pman = ctx.p1().manifest()
2470 for filename, change in pman.diff(man).items():
2486 for filename, change in pman.diff(man).items():
2471 fctx = repo.file(filename)
2487 fctx = repo.file(filename)
2472 f1 = fctx.revision(change[0][0] or -1)
2488 f1 = fctx.revision(change[0][0] or -1)
2473 f2 = fctx.revision(change[1][0] or -1)
2489 f2 = fctx.revision(change[1][0] or -1)
2474 textpairs.append((f1, f2))
2490 textpairs.append((f1, f2))
2475 else:
2491 else:
2476 dp = r.deltaparent(rev)
2492 dp = r.deltaparent(rev)
2477 textpairs.append((r.revision(dp), r.revision(rev)))
2493 textpairs.append((r.revision(dp), r.revision(rev)))
2478
2494
2479 def d():
2495 def d():
2480 for left, right in textpairs:
2496 for left, right in textpairs:
2481 # The date strings don't matter, so we pass empty strings.
2497 # The date strings don't matter, so we pass empty strings.
2482 headerlines, hunks = mdiff.unidiff(
2498 headerlines, hunks = mdiff.unidiff(
2483 left, b'', right, b'', b'left', b'right', binary=False
2499 left, b'', right, b'', b'left', b'right', binary=False
2484 )
2500 )
2485 # consume iterators in roughly the way patch.py does
2501 # consume iterators in roughly the way patch.py does
2486 b'\n'.join(headerlines)
2502 b'\n'.join(headerlines)
2487 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2503 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2488
2504
2489 timer, fm = gettimer(ui, opts)
2505 timer, fm = gettimer(ui, opts)
2490 timer(d)
2506 timer(d)
2491 fm.end()
2507 fm.end()
2492
2508
2493
2509
2494 @command(b'perfdiffwd', formatteropts)
2510 @command(b'perfdiffwd', formatteropts)
2495 def perfdiffwd(ui, repo, **opts):
2511 def perfdiffwd(ui, repo, **opts):
2496 """Profile diff of working directory changes"""
2512 """Profile diff of working directory changes"""
2497 opts = _byteskwargs(opts)
2513 opts = _byteskwargs(opts)
2498 timer, fm = gettimer(ui, opts)
2514 timer, fm = gettimer(ui, opts)
2499 options = {
2515 options = {
2500 'w': 'ignore_all_space',
2516 'w': 'ignore_all_space',
2501 'b': 'ignore_space_change',
2517 'b': 'ignore_space_change',
2502 'B': 'ignore_blank_lines',
2518 'B': 'ignore_blank_lines',
2503 }
2519 }
2504
2520
2505 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2521 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2506 opts = dict((options[c], b'1') for c in diffopt)
2522 opts = dict((options[c], b'1') for c in diffopt)
2507
2523
2508 def d():
2524 def d():
2509 ui.pushbuffer()
2525 ui.pushbuffer()
2510 commands.diff(ui, repo, **opts)
2526 commands.diff(ui, repo, **opts)
2511 ui.popbuffer()
2527 ui.popbuffer()
2512
2528
2513 diffopt = diffopt.encode('ascii')
2529 diffopt = diffopt.encode('ascii')
2514 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2530 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2515 timer(d, title=title)
2531 timer(d, title=title)
2516 fm.end()
2532 fm.end()
2517
2533
2518
2534
2519 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2535 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2520 def perfrevlogindex(ui, repo, file_=None, **opts):
2536 def perfrevlogindex(ui, repo, file_=None, **opts):
2521 """Benchmark operations against a revlog index.
2537 """Benchmark operations against a revlog index.
2522
2538
2523 This tests constructing a revlog instance, reading index data,
2539 This tests constructing a revlog instance, reading index data,
2524 parsing index data, and performing various operations related to
2540 parsing index data, and performing various operations related to
2525 index data.
2541 index data.
2526 """
2542 """
2527
2543
2528 opts = _byteskwargs(opts)
2544 opts = _byteskwargs(opts)
2529
2545
2530 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2546 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2531
2547
2532 opener = getattr(rl, 'opener') # trick linter
2548 opener = getattr(rl, 'opener') # trick linter
2533 indexfile = rl.indexfile
2549 indexfile = rl.indexfile
2534 data = opener.read(indexfile)
2550 data = opener.read(indexfile)
2535
2551
2536 header = struct.unpack(b'>I', data[0:4])[0]
2552 header = struct.unpack(b'>I', data[0:4])[0]
2537 version = header & 0xFFFF
2553 version = header & 0xFFFF
2538 if version == 1:
2554 if version == 1:
2539 revlogio = revlog.revlogio()
2555 revlogio = revlog.revlogio()
2540 inline = header & (1 << 16)
2556 inline = header & (1 << 16)
2541 else:
2557 else:
2542 raise error.Abort(b'unsupported revlog version: %d' % version)
2558 raise error.Abort(b'unsupported revlog version: %d' % version)
2543
2559
2544 rllen = len(rl)
2560 rllen = len(rl)
2545
2561
2546 node0 = rl.node(0)
2562 node0 = rl.node(0)
2547 node25 = rl.node(rllen // 4)
2563 node25 = rl.node(rllen // 4)
2548 node50 = rl.node(rllen // 2)
2564 node50 = rl.node(rllen // 2)
2549 node75 = rl.node(rllen // 4 * 3)
2565 node75 = rl.node(rllen // 4 * 3)
2550 node100 = rl.node(rllen - 1)
2566 node100 = rl.node(rllen - 1)
2551
2567
2552 allrevs = range(rllen)
2568 allrevs = range(rllen)
2553 allrevsrev = list(reversed(allrevs))
2569 allrevsrev = list(reversed(allrevs))
2554 allnodes = [rl.node(rev) for rev in range(rllen)]
2570 allnodes = [rl.node(rev) for rev in range(rllen)]
2555 allnodesrev = list(reversed(allnodes))
2571 allnodesrev = list(reversed(allnodes))
2556
2572
2557 def constructor():
2573 def constructor():
2558 revlog.revlog(opener, indexfile)
2574 revlog.revlog(opener, indexfile)
2559
2575
2560 def read():
2576 def read():
2561 with opener(indexfile) as fh:
2577 with opener(indexfile) as fh:
2562 fh.read()
2578 fh.read()
2563
2579
2564 def parseindex():
2580 def parseindex():
2565 revlogio.parseindex(data, inline)
2581 revlogio.parseindex(data, inline)
2566
2582
2567 def getentry(revornode):
2583 def getentry(revornode):
2568 index = revlogio.parseindex(data, inline)[0]
2584 index = revlogio.parseindex(data, inline)[0]
2569 index[revornode]
2585 index[revornode]
2570
2586
2571 def getentries(revs, count=1):
2587 def getentries(revs, count=1):
2572 index = revlogio.parseindex(data, inline)[0]
2588 index = revlogio.parseindex(data, inline)[0]
2573
2589
2574 for i in range(count):
2590 for i in range(count):
2575 for rev in revs:
2591 for rev in revs:
2576 index[rev]
2592 index[rev]
2577
2593
2578 def resolvenode(node):
2594 def resolvenode(node):
2579 nodemap = revlogio.parseindex(data, inline)[1]
2595 nodemap = revlogio.parseindex(data, inline)[1]
2580 # This only works for the C code.
2596 # This only works for the C code.
2581 if nodemap is None:
2597 if nodemap is None:
2582 return
2598 return
2583
2599
2584 try:
2600 try:
2585 nodemap[node]
2601 nodemap[node]
2586 except error.RevlogError:
2602 except error.RevlogError:
2587 pass
2603 pass
2588
2604
2589 def resolvenodes(nodes, count=1):
2605 def resolvenodes(nodes, count=1):
2590 nodemap = revlogio.parseindex(data, inline)[1]
2606 nodemap = revlogio.parseindex(data, inline)[1]
2591 if nodemap is None:
2607 if nodemap is None:
2592 return
2608 return
2593
2609
2594 for i in range(count):
2610 for i in range(count):
2595 for node in nodes:
2611 for node in nodes:
2596 try:
2612 try:
2597 nodemap[node]
2613 nodemap[node]
2598 except error.RevlogError:
2614 except error.RevlogError:
2599 pass
2615 pass
2600
2616
2601 benches = [
2617 benches = [
2602 (constructor, b'revlog constructor'),
2618 (constructor, b'revlog constructor'),
2603 (read, b'read'),
2619 (read, b'read'),
2604 (parseindex, b'create index object'),
2620 (parseindex, b'create index object'),
2605 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2621 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2606 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2622 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2607 (lambda: resolvenode(node0), b'look up node at rev 0'),
2623 (lambda: resolvenode(node0), b'look up node at rev 0'),
2608 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2624 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2609 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2625 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2610 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2626 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2611 (lambda: resolvenode(node100), b'look up node at tip'),
2627 (lambda: resolvenode(node100), b'look up node at tip'),
2612 # 2x variation is to measure caching impact.
2628 # 2x variation is to measure caching impact.
2613 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2629 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2614 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2630 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2615 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2631 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2616 (
2632 (
2617 lambda: resolvenodes(allnodesrev, 2),
2633 lambda: resolvenodes(allnodesrev, 2),
2618 b'look up all nodes 2x (reverse)',
2634 b'look up all nodes 2x (reverse)',
2619 ),
2635 ),
2620 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2636 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2621 (
2637 (
2622 lambda: getentries(allrevs, 2),
2638 lambda: getentries(allrevs, 2),
2623 b'retrieve all index entries 2x (forward)',
2639 b'retrieve all index entries 2x (forward)',
2624 ),
2640 ),
2625 (
2641 (
2626 lambda: getentries(allrevsrev),
2642 lambda: getentries(allrevsrev),
2627 b'retrieve all index entries (reverse)',
2643 b'retrieve all index entries (reverse)',
2628 ),
2644 ),
2629 (
2645 (
2630 lambda: getentries(allrevsrev, 2),
2646 lambda: getentries(allrevsrev, 2),
2631 b'retrieve all index entries 2x (reverse)',
2647 b'retrieve all index entries 2x (reverse)',
2632 ),
2648 ),
2633 ]
2649 ]
2634
2650
2635 for fn, title in benches:
2651 for fn, title in benches:
2636 timer, fm = gettimer(ui, opts)
2652 timer, fm = gettimer(ui, opts)
2637 timer(fn, title=title)
2653 timer(fn, title=title)
2638 fm.end()
2654 fm.end()
2639
2655
2640
2656
2641 @command(
2657 @command(
2642 b'perfrevlogrevisions',
2658 b'perfrevlogrevisions',
2643 revlogopts
2659 revlogopts
2644 + formatteropts
2660 + formatteropts
2645 + [
2661 + [
2646 (b'd', b'dist', 100, b'distance between the revisions'),
2662 (b'd', b'dist', 100, b'distance between the revisions'),
2647 (b's', b'startrev', 0, b'revision to start reading at'),
2663 (b's', b'startrev', 0, b'revision to start reading at'),
2648 (b'', b'reverse', False, b'read in reverse'),
2664 (b'', b'reverse', False, b'read in reverse'),
2649 ],
2665 ],
2650 b'-c|-m|FILE',
2666 b'-c|-m|FILE',
2651 )
2667 )
2652 def perfrevlogrevisions(
2668 def perfrevlogrevisions(
2653 ui, repo, file_=None, startrev=0, reverse=False, **opts
2669 ui, repo, file_=None, startrev=0, reverse=False, **opts
2654 ):
2670 ):
2655 """Benchmark reading a series of revisions from a revlog.
2671 """Benchmark reading a series of revisions from a revlog.
2656
2672
2657 By default, we read every ``-d/--dist`` revision from 0 to tip of
2673 By default, we read every ``-d/--dist`` revision from 0 to tip of
2658 the specified revlog.
2674 the specified revlog.
2659
2675
2660 The start revision can be defined via ``-s/--startrev``.
2676 The start revision can be defined via ``-s/--startrev``.
2661 """
2677 """
2662 opts = _byteskwargs(opts)
2678 opts = _byteskwargs(opts)
2663
2679
2664 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2680 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2665 rllen = getlen(ui)(rl)
2681 rllen = getlen(ui)(rl)
2666
2682
2667 if startrev < 0:
2683 if startrev < 0:
2668 startrev = rllen + startrev
2684 startrev = rllen + startrev
2669
2685
2670 def d():
2686 def d():
2671 rl.clearcaches()
2687 rl.clearcaches()
2672
2688
2673 beginrev = startrev
2689 beginrev = startrev
2674 endrev = rllen
2690 endrev = rllen
2675 dist = opts[b'dist']
2691 dist = opts[b'dist']
2676
2692
2677 if reverse:
2693 if reverse:
2678 beginrev, endrev = endrev - 1, beginrev - 1
2694 beginrev, endrev = endrev - 1, beginrev - 1
2679 dist = -1 * dist
2695 dist = -1 * dist
2680
2696
2681 for x in _xrange(beginrev, endrev, dist):
2697 for x in _xrange(beginrev, endrev, dist):
2682 # Old revisions don't support passing int.
2698 # Old revisions don't support passing int.
2683 n = rl.node(x)
2699 n = rl.node(x)
2684 rl.revision(n)
2700 rl.revision(n)
2685
2701
2686 timer, fm = gettimer(ui, opts)
2702 timer, fm = gettimer(ui, opts)
2687 timer(d)
2703 timer(d)
2688 fm.end()
2704 fm.end()
2689
2705
2690
2706
2691 @command(
2707 @command(
2692 b'perfrevlogwrite',
2708 b'perfrevlogwrite',
2693 revlogopts
2709 revlogopts
2694 + formatteropts
2710 + formatteropts
2695 + [
2711 + [
2696 (b's', b'startrev', 1000, b'revision to start writing at'),
2712 (b's', b'startrev', 1000, b'revision to start writing at'),
2697 (b'', b'stoprev', -1, b'last revision to write'),
2713 (b'', b'stoprev', -1, b'last revision to write'),
2698 (b'', b'count', 3, b'number of passes to perform'),
2714 (b'', b'count', 3, b'number of passes to perform'),
2699 (b'', b'details', False, b'print timing for every revisions tested'),
2715 (b'', b'details', False, b'print timing for every revisions tested'),
2700 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2716 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2701 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2717 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2702 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2718 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2703 ],
2719 ],
2704 b'-c|-m|FILE',
2720 b'-c|-m|FILE',
2705 )
2721 )
2706 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2722 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2707 """Benchmark writing a series of revisions to a revlog.
2723 """Benchmark writing a series of revisions to a revlog.
2708
2724
2709 Possible source values are:
2725 Possible source values are:
2710 * `full`: add from a full text (default).
2726 * `full`: add from a full text (default).
2711 * `parent-1`: add from a delta to the first parent
2727 * `parent-1`: add from a delta to the first parent
2712 * `parent-2`: add from a delta to the second parent if it exists
2728 * `parent-2`: add from a delta to the second parent if it exists
2713 (use a delta from the first parent otherwise)
2729 (use a delta from the first parent otherwise)
2714 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2730 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2715 * `storage`: add from the existing precomputed deltas
2731 * `storage`: add from the existing precomputed deltas
2716
2732
2717 Note: This performance command measures performance in a custom way. As a
2733 Note: This performance command measures performance in a custom way. As a
2718 result some of the global configuration of the 'perf' command does not
2734 result some of the global configuration of the 'perf' command does not
2719 apply to it:
2735 apply to it:
2720
2736
2721 * ``pre-run``: disabled
2737 * ``pre-run``: disabled
2722
2738
2723 * ``profile-benchmark``: disabled
2739 * ``profile-benchmark``: disabled
2724
2740
2725 * ``run-limits``: disabled use --count instead
2741 * ``run-limits``: disabled use --count instead
2726 """
2742 """
2727 opts = _byteskwargs(opts)
2743 opts = _byteskwargs(opts)
2728
2744
2729 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2745 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2730 rllen = getlen(ui)(rl)
2746 rllen = getlen(ui)(rl)
2731 if startrev < 0:
2747 if startrev < 0:
2732 startrev = rllen + startrev
2748 startrev = rllen + startrev
2733 if stoprev < 0:
2749 if stoprev < 0:
2734 stoprev = rllen + stoprev
2750 stoprev = rllen + stoprev
2735
2751
2736 lazydeltabase = opts['lazydeltabase']
2752 lazydeltabase = opts['lazydeltabase']
2737 source = opts['source']
2753 source = opts['source']
2738 clearcaches = opts['clear_caches']
2754 clearcaches = opts['clear_caches']
2739 validsource = (
2755 validsource = (
2740 b'full',
2756 b'full',
2741 b'parent-1',
2757 b'parent-1',
2742 b'parent-2',
2758 b'parent-2',
2743 b'parent-smallest',
2759 b'parent-smallest',
2744 b'storage',
2760 b'storage',
2745 )
2761 )
2746 if source not in validsource:
2762 if source not in validsource:
2747 raise error.Abort('invalid source type: %s' % source)
2763 raise error.Abort('invalid source type: %s' % source)
2748
2764
2749 ### actually gather results
2765 ### actually gather results
2750 count = opts['count']
2766 count = opts['count']
2751 if count <= 0:
2767 if count <= 0:
2752 raise error.Abort('invalide run count: %d' % count)
2768 raise error.Abort('invalide run count: %d' % count)
2753 allresults = []
2769 allresults = []
2754 for c in range(count):
2770 for c in range(count):
2755 timing = _timeonewrite(
2771 timing = _timeonewrite(
2756 ui,
2772 ui,
2757 rl,
2773 rl,
2758 source,
2774 source,
2759 startrev,
2775 startrev,
2760 stoprev,
2776 stoprev,
2761 c + 1,
2777 c + 1,
2762 lazydeltabase=lazydeltabase,
2778 lazydeltabase=lazydeltabase,
2763 clearcaches=clearcaches,
2779 clearcaches=clearcaches,
2764 )
2780 )
2765 allresults.append(timing)
2781 allresults.append(timing)
2766
2782
2767 ### consolidate the results in a single list
2783 ### consolidate the results in a single list
2768 results = []
2784 results = []
2769 for idx, (rev, t) in enumerate(allresults[0]):
2785 for idx, (rev, t) in enumerate(allresults[0]):
2770 ts = [t]
2786 ts = [t]
2771 for other in allresults[1:]:
2787 for other in allresults[1:]:
2772 orev, ot = other[idx]
2788 orev, ot = other[idx]
2773 assert orev == rev
2789 assert orev == rev
2774 ts.append(ot)
2790 ts.append(ot)
2775 results.append((rev, ts))
2791 results.append((rev, ts))
2776 resultcount = len(results)
2792 resultcount = len(results)
2777
2793
2778 ### Compute and display relevant statistics
2794 ### Compute and display relevant statistics
2779
2795
2780 # get a formatter
2796 # get a formatter
2781 fm = ui.formatter(b'perf', opts)
2797 fm = ui.formatter(b'perf', opts)
2782 displayall = ui.configbool(b"perf", b"all-timing", False)
2798 displayall = ui.configbool(b"perf", b"all-timing", False)
2783
2799
2784 # print individual details if requested
2800 # print individual details if requested
2785 if opts['details']:
2801 if opts['details']:
2786 for idx, item in enumerate(results, 1):
2802 for idx, item in enumerate(results, 1):
2787 rev, data = item
2803 rev, data = item
2788 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2804 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2789 formatone(fm, data, title=title, displayall=displayall)
2805 formatone(fm, data, title=title, displayall=displayall)
2790
2806
2791 # sorts results by median time
2807 # sorts results by median time
2792 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2808 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2793 # list of (name, index) to display)
2809 # list of (name, index) to display)
2794 relevants = [
2810 relevants = [
2795 ("min", 0),
2811 ("min", 0),
2796 ("10%", resultcount * 10 // 100),
2812 ("10%", resultcount * 10 // 100),
2797 ("25%", resultcount * 25 // 100),
2813 ("25%", resultcount * 25 // 100),
2798 ("50%", resultcount * 70 // 100),
2814 ("50%", resultcount * 70 // 100),
2799 ("75%", resultcount * 75 // 100),
2815 ("75%", resultcount * 75 // 100),
2800 ("90%", resultcount * 90 // 100),
2816 ("90%", resultcount * 90 // 100),
2801 ("95%", resultcount * 95 // 100),
2817 ("95%", resultcount * 95 // 100),
2802 ("99%", resultcount * 99 // 100),
2818 ("99%", resultcount * 99 // 100),
2803 ("99.9%", resultcount * 999 // 1000),
2819 ("99.9%", resultcount * 999 // 1000),
2804 ("99.99%", resultcount * 9999 // 10000),
2820 ("99.99%", resultcount * 9999 // 10000),
2805 ("99.999%", resultcount * 99999 // 100000),
2821 ("99.999%", resultcount * 99999 // 100000),
2806 ("max", -1),
2822 ("max", -1),
2807 ]
2823 ]
2808 if not ui.quiet:
2824 if not ui.quiet:
2809 for name, idx in relevants:
2825 for name, idx in relevants:
2810 data = results[idx]
2826 data = results[idx]
2811 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2827 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2812 formatone(fm, data[1], title=title, displayall=displayall)
2828 formatone(fm, data[1], title=title, displayall=displayall)
2813
2829
2814 # XXX summing that many float will not be very precise, we ignore this fact
2830 # XXX summing that many float will not be very precise, we ignore this fact
2815 # for now
2831 # for now
2816 totaltime = []
2832 totaltime = []
2817 for item in allresults:
2833 for item in allresults:
2818 totaltime.append(
2834 totaltime.append(
2819 (
2835 (
2820 sum(x[1][0] for x in item),
2836 sum(x[1][0] for x in item),
2821 sum(x[1][1] for x in item),
2837 sum(x[1][1] for x in item),
2822 sum(x[1][2] for x in item),
2838 sum(x[1][2] for x in item),
2823 )
2839 )
2824 )
2840 )
2825 formatone(
2841 formatone(
2826 fm,
2842 fm,
2827 totaltime,
2843 totaltime,
2828 title="total time (%d revs)" % resultcount,
2844 title="total time (%d revs)" % resultcount,
2829 displayall=displayall,
2845 displayall=displayall,
2830 )
2846 )
2831 fm.end()
2847 fm.end()
2832
2848
2833
2849
2834 class _faketr(object):
2850 class _faketr(object):
2835 def add(s, x, y, z=None):
2851 def add(s, x, y, z=None):
2836 return None
2852 return None
2837
2853
2838
2854
2839 def _timeonewrite(
2855 def _timeonewrite(
2840 ui,
2856 ui,
2841 orig,
2857 orig,
2842 source,
2858 source,
2843 startrev,
2859 startrev,
2844 stoprev,
2860 stoprev,
2845 runidx=None,
2861 runidx=None,
2846 lazydeltabase=True,
2862 lazydeltabase=True,
2847 clearcaches=True,
2863 clearcaches=True,
2848 ):
2864 ):
2849 timings = []
2865 timings = []
2850 tr = _faketr()
2866 tr = _faketr()
2851 with _temprevlog(ui, orig, startrev) as dest:
2867 with _temprevlog(ui, orig, startrev) as dest:
2852 dest._lazydeltabase = lazydeltabase
2868 dest._lazydeltabase = lazydeltabase
2853 revs = list(orig.revs(startrev, stoprev))
2869 revs = list(orig.revs(startrev, stoprev))
2854 total = len(revs)
2870 total = len(revs)
2855 topic = 'adding'
2871 topic = 'adding'
2856 if runidx is not None:
2872 if runidx is not None:
2857 topic += ' (run #%d)' % runidx
2873 topic += ' (run #%d)' % runidx
2858 # Support both old and new progress API
2874 # Support both old and new progress API
2859 if util.safehasattr(ui, 'makeprogress'):
2875 if util.safehasattr(ui, 'makeprogress'):
2860 progress = ui.makeprogress(topic, unit='revs', total=total)
2876 progress = ui.makeprogress(topic, unit='revs', total=total)
2861
2877
2862 def updateprogress(pos):
2878 def updateprogress(pos):
2863 progress.update(pos)
2879 progress.update(pos)
2864
2880
2865 def completeprogress():
2881 def completeprogress():
2866 progress.complete()
2882 progress.complete()
2867
2883
2868 else:
2884 else:
2869
2885
2870 def updateprogress(pos):
2886 def updateprogress(pos):
2871 ui.progress(topic, pos, unit='revs', total=total)
2887 ui.progress(topic, pos, unit='revs', total=total)
2872
2888
2873 def completeprogress():
2889 def completeprogress():
2874 ui.progress(topic, None, unit='revs', total=total)
2890 ui.progress(topic, None, unit='revs', total=total)
2875
2891
2876 for idx, rev in enumerate(revs):
2892 for idx, rev in enumerate(revs):
2877 updateprogress(idx)
2893 updateprogress(idx)
2878 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2894 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2879 if clearcaches:
2895 if clearcaches:
2880 dest.index.clearcaches()
2896 dest.index.clearcaches()
2881 dest.clearcaches()
2897 dest.clearcaches()
2882 with timeone() as r:
2898 with timeone() as r:
2883 dest.addrawrevision(*addargs, **addkwargs)
2899 dest.addrawrevision(*addargs, **addkwargs)
2884 timings.append((rev, r[0]))
2900 timings.append((rev, r[0]))
2885 updateprogress(total)
2901 updateprogress(total)
2886 completeprogress()
2902 completeprogress()
2887 return timings
2903 return timings
2888
2904
2889
2905
2890 def _getrevisionseed(orig, rev, tr, source):
2906 def _getrevisionseed(orig, rev, tr, source):
2891 from mercurial.node import nullid
2907 from mercurial.node import nullid
2892
2908
2893 linkrev = orig.linkrev(rev)
2909 linkrev = orig.linkrev(rev)
2894 node = orig.node(rev)
2910 node = orig.node(rev)
2895 p1, p2 = orig.parents(node)
2911 p1, p2 = orig.parents(node)
2896 flags = orig.flags(rev)
2912 flags = orig.flags(rev)
2897 cachedelta = None
2913 cachedelta = None
2898 text = None
2914 text = None
2899
2915
2900 if source == b'full':
2916 if source == b'full':
2901 text = orig.revision(rev)
2917 text = orig.revision(rev)
2902 elif source == b'parent-1':
2918 elif source == b'parent-1':
2903 baserev = orig.rev(p1)
2919 baserev = orig.rev(p1)
2904 cachedelta = (baserev, orig.revdiff(p1, rev))
2920 cachedelta = (baserev, orig.revdiff(p1, rev))
2905 elif source == b'parent-2':
2921 elif source == b'parent-2':
2906 parent = p2
2922 parent = p2
2907 if p2 == nullid:
2923 if p2 == nullid:
2908 parent = p1
2924 parent = p1
2909 baserev = orig.rev(parent)
2925 baserev = orig.rev(parent)
2910 cachedelta = (baserev, orig.revdiff(parent, rev))
2926 cachedelta = (baserev, orig.revdiff(parent, rev))
2911 elif source == b'parent-smallest':
2927 elif source == b'parent-smallest':
2912 p1diff = orig.revdiff(p1, rev)
2928 p1diff = orig.revdiff(p1, rev)
2913 parent = p1
2929 parent = p1
2914 diff = p1diff
2930 diff = p1diff
2915 if p2 != nullid:
2931 if p2 != nullid:
2916 p2diff = orig.revdiff(p2, rev)
2932 p2diff = orig.revdiff(p2, rev)
2917 if len(p1diff) > len(p2diff):
2933 if len(p1diff) > len(p2diff):
2918 parent = p2
2934 parent = p2
2919 diff = p2diff
2935 diff = p2diff
2920 baserev = orig.rev(parent)
2936 baserev = orig.rev(parent)
2921 cachedelta = (baserev, diff)
2937 cachedelta = (baserev, diff)
2922 elif source == b'storage':
2938 elif source == b'storage':
2923 baserev = orig.deltaparent(rev)
2939 baserev = orig.deltaparent(rev)
2924 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2940 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2925
2941
2926 return (
2942 return (
2927 (text, tr, linkrev, p1, p2),
2943 (text, tr, linkrev, p1, p2),
2928 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2944 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2929 )
2945 )
2930
2946
2931
2947
2932 @contextlib.contextmanager
2948 @contextlib.contextmanager
2933 def _temprevlog(ui, orig, truncaterev):
2949 def _temprevlog(ui, orig, truncaterev):
2934 from mercurial import vfs as vfsmod
2950 from mercurial import vfs as vfsmod
2935
2951
2936 if orig._inline:
2952 if orig._inline:
2937 raise error.Abort('not supporting inline revlog (yet)')
2953 raise error.Abort('not supporting inline revlog (yet)')
2938 revlogkwargs = {}
2954 revlogkwargs = {}
2939 k = 'upperboundcomp'
2955 k = 'upperboundcomp'
2940 if util.safehasattr(orig, k):
2956 if util.safehasattr(orig, k):
2941 revlogkwargs[k] = getattr(orig, k)
2957 revlogkwargs[k] = getattr(orig, k)
2942
2958
2943 origindexpath = orig.opener.join(orig.indexfile)
2959 origindexpath = orig.opener.join(orig.indexfile)
2944 origdatapath = orig.opener.join(orig.datafile)
2960 origdatapath = orig.opener.join(orig.datafile)
2945 indexname = 'revlog.i'
2961 indexname = 'revlog.i'
2946 dataname = 'revlog.d'
2962 dataname = 'revlog.d'
2947
2963
2948 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2964 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2949 try:
2965 try:
2950 # copy the data file in a temporary directory
2966 # copy the data file in a temporary directory
2951 ui.debug('copying data in %s\n' % tmpdir)
2967 ui.debug('copying data in %s\n' % tmpdir)
2952 destindexpath = os.path.join(tmpdir, 'revlog.i')
2968 destindexpath = os.path.join(tmpdir, 'revlog.i')
2953 destdatapath = os.path.join(tmpdir, 'revlog.d')
2969 destdatapath = os.path.join(tmpdir, 'revlog.d')
2954 shutil.copyfile(origindexpath, destindexpath)
2970 shutil.copyfile(origindexpath, destindexpath)
2955 shutil.copyfile(origdatapath, destdatapath)
2971 shutil.copyfile(origdatapath, destdatapath)
2956
2972
2957 # remove the data we want to add again
2973 # remove the data we want to add again
2958 ui.debug('truncating data to be rewritten\n')
2974 ui.debug('truncating data to be rewritten\n')
2959 with open(destindexpath, 'ab') as index:
2975 with open(destindexpath, 'ab') as index:
2960 index.seek(0)
2976 index.seek(0)
2961 index.truncate(truncaterev * orig._io.size)
2977 index.truncate(truncaterev * orig._io.size)
2962 with open(destdatapath, 'ab') as data:
2978 with open(destdatapath, 'ab') as data:
2963 data.seek(0)
2979 data.seek(0)
2964 data.truncate(orig.start(truncaterev))
2980 data.truncate(orig.start(truncaterev))
2965
2981
2966 # instantiate a new revlog from the temporary copy
2982 # instantiate a new revlog from the temporary copy
2967 ui.debug('truncating adding to be rewritten\n')
2983 ui.debug('truncating adding to be rewritten\n')
2968 vfs = vfsmod.vfs(tmpdir)
2984 vfs = vfsmod.vfs(tmpdir)
2969 vfs.options = getattr(orig.opener, 'options', None)
2985 vfs.options = getattr(orig.opener, 'options', None)
2970
2986
2971 dest = revlog.revlog(
2987 dest = revlog.revlog(
2972 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2988 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2973 )
2989 )
2974 if dest._inline:
2990 if dest._inline:
2975 raise error.Abort('not supporting inline revlog (yet)')
2991 raise error.Abort('not supporting inline revlog (yet)')
2976 # make sure internals are initialized
2992 # make sure internals are initialized
2977 dest.revision(len(dest) - 1)
2993 dest.revision(len(dest) - 1)
2978 yield dest
2994 yield dest
2979 del dest, vfs
2995 del dest, vfs
2980 finally:
2996 finally:
2981 shutil.rmtree(tmpdir, True)
2997 shutil.rmtree(tmpdir, True)
2982
2998
2983
2999
2984 @command(
3000 @command(
2985 b'perfrevlogchunks',
3001 b'perfrevlogchunks',
2986 revlogopts
3002 revlogopts
2987 + formatteropts
3003 + formatteropts
2988 + [
3004 + [
2989 (b'e', b'engines', b'', b'compression engines to use'),
3005 (b'e', b'engines', b'', b'compression engines to use'),
2990 (b's', b'startrev', 0, b'revision to start at'),
3006 (b's', b'startrev', 0, b'revision to start at'),
2991 ],
3007 ],
2992 b'-c|-m|FILE',
3008 b'-c|-m|FILE',
2993 )
3009 )
2994 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3010 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2995 """Benchmark operations on revlog chunks.
3011 """Benchmark operations on revlog chunks.
2996
3012
2997 Logically, each revlog is a collection of fulltext revisions. However,
3013 Logically, each revlog is a collection of fulltext revisions. However,
2998 stored within each revlog are "chunks" of possibly compressed data. This
3014 stored within each revlog are "chunks" of possibly compressed data. This
2999 data needs to be read and decompressed or compressed and written.
3015 data needs to be read and decompressed or compressed and written.
3000
3016
3001 This command measures the time it takes to read+decompress and recompress
3017 This command measures the time it takes to read+decompress and recompress
3002 chunks in a revlog. It effectively isolates I/O and compression performance.
3018 chunks in a revlog. It effectively isolates I/O and compression performance.
3003 For measurements of higher-level operations like resolving revisions,
3019 For measurements of higher-level operations like resolving revisions,
3004 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3020 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3005 """
3021 """
3006 opts = _byteskwargs(opts)
3022 opts = _byteskwargs(opts)
3007
3023
3008 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3024 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3009
3025
3010 # _chunkraw was renamed to _getsegmentforrevs.
3026 # _chunkraw was renamed to _getsegmentforrevs.
3011 try:
3027 try:
3012 segmentforrevs = rl._getsegmentforrevs
3028 segmentforrevs = rl._getsegmentforrevs
3013 except AttributeError:
3029 except AttributeError:
3014 segmentforrevs = rl._chunkraw
3030 segmentforrevs = rl._chunkraw
3015
3031
3016 # Verify engines argument.
3032 # Verify engines argument.
3017 if engines:
3033 if engines:
3018 engines = set(e.strip() for e in engines.split(b','))
3034 engines = set(e.strip() for e in engines.split(b','))
3019 for engine in engines:
3035 for engine in engines:
3020 try:
3036 try:
3021 util.compressionengines[engine]
3037 util.compressionengines[engine]
3022 except KeyError:
3038 except KeyError:
3023 raise error.Abort(b'unknown compression engine: %s' % engine)
3039 raise error.Abort(b'unknown compression engine: %s' % engine)
3024 else:
3040 else:
3025 engines = []
3041 engines = []
3026 for e in util.compengines:
3042 for e in util.compengines:
3027 engine = util.compengines[e]
3043 engine = util.compengines[e]
3028 try:
3044 try:
3029 if engine.available():
3045 if engine.available():
3030 engine.revlogcompressor().compress(b'dummy')
3046 engine.revlogcompressor().compress(b'dummy')
3031 engines.append(e)
3047 engines.append(e)
3032 except NotImplementedError:
3048 except NotImplementedError:
3033 pass
3049 pass
3034
3050
3035 revs = list(rl.revs(startrev, len(rl) - 1))
3051 revs = list(rl.revs(startrev, len(rl) - 1))
3036
3052
3037 def rlfh(rl):
3053 def rlfh(rl):
3038 if rl._inline:
3054 if rl._inline:
3039 return getsvfs(repo)(rl.indexfile)
3055 return getsvfs(repo)(rl.indexfile)
3040 else:
3056 else:
3041 return getsvfs(repo)(rl.datafile)
3057 return getsvfs(repo)(rl.datafile)
3042
3058
3043 def doread():
3059 def doread():
3044 rl.clearcaches()
3060 rl.clearcaches()
3045 for rev in revs:
3061 for rev in revs:
3046 segmentforrevs(rev, rev)
3062 segmentforrevs(rev, rev)
3047
3063
3048 def doreadcachedfh():
3064 def doreadcachedfh():
3049 rl.clearcaches()
3065 rl.clearcaches()
3050 fh = rlfh(rl)
3066 fh = rlfh(rl)
3051 for rev in revs:
3067 for rev in revs:
3052 segmentforrevs(rev, rev, df=fh)
3068 segmentforrevs(rev, rev, df=fh)
3053
3069
3054 def doreadbatch():
3070 def doreadbatch():
3055 rl.clearcaches()
3071 rl.clearcaches()
3056 segmentforrevs(revs[0], revs[-1])
3072 segmentforrevs(revs[0], revs[-1])
3057
3073
3058 def doreadbatchcachedfh():
3074 def doreadbatchcachedfh():
3059 rl.clearcaches()
3075 rl.clearcaches()
3060 fh = rlfh(rl)
3076 fh = rlfh(rl)
3061 segmentforrevs(revs[0], revs[-1], df=fh)
3077 segmentforrevs(revs[0], revs[-1], df=fh)
3062
3078
3063 def dochunk():
3079 def dochunk():
3064 rl.clearcaches()
3080 rl.clearcaches()
3065 fh = rlfh(rl)
3081 fh = rlfh(rl)
3066 for rev in revs:
3082 for rev in revs:
3067 rl._chunk(rev, df=fh)
3083 rl._chunk(rev, df=fh)
3068
3084
3069 chunks = [None]
3085 chunks = [None]
3070
3086
3071 def dochunkbatch():
3087 def dochunkbatch():
3072 rl.clearcaches()
3088 rl.clearcaches()
3073 fh = rlfh(rl)
3089 fh = rlfh(rl)
3074 # Save chunks as a side-effect.
3090 # Save chunks as a side-effect.
3075 chunks[0] = rl._chunks(revs, df=fh)
3091 chunks[0] = rl._chunks(revs, df=fh)
3076
3092
3077 def docompress(compressor):
3093 def docompress(compressor):
3078 rl.clearcaches()
3094 rl.clearcaches()
3079
3095
3080 try:
3096 try:
3081 # Swap in the requested compression engine.
3097 # Swap in the requested compression engine.
3082 oldcompressor = rl._compressor
3098 oldcompressor = rl._compressor
3083 rl._compressor = compressor
3099 rl._compressor = compressor
3084 for chunk in chunks[0]:
3100 for chunk in chunks[0]:
3085 rl.compress(chunk)
3101 rl.compress(chunk)
3086 finally:
3102 finally:
3087 rl._compressor = oldcompressor
3103 rl._compressor = oldcompressor
3088
3104
3089 benches = [
3105 benches = [
3090 (lambda: doread(), b'read'),
3106 (lambda: doread(), b'read'),
3091 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3107 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3092 (lambda: doreadbatch(), b'read batch'),
3108 (lambda: doreadbatch(), b'read batch'),
3093 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3109 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3094 (lambda: dochunk(), b'chunk'),
3110 (lambda: dochunk(), b'chunk'),
3095 (lambda: dochunkbatch(), b'chunk batch'),
3111 (lambda: dochunkbatch(), b'chunk batch'),
3096 ]
3112 ]
3097
3113
3098 for engine in sorted(engines):
3114 for engine in sorted(engines):
3099 compressor = util.compengines[engine].revlogcompressor()
3115 compressor = util.compengines[engine].revlogcompressor()
3100 benches.append(
3116 benches.append(
3101 (
3117 (
3102 functools.partial(docompress, compressor),
3118 functools.partial(docompress, compressor),
3103 b'compress w/ %s' % engine,
3119 b'compress w/ %s' % engine,
3104 )
3120 )
3105 )
3121 )
3106
3122
3107 for fn, title in benches:
3123 for fn, title in benches:
3108 timer, fm = gettimer(ui, opts)
3124 timer, fm = gettimer(ui, opts)
3109 timer(fn, title=title)
3125 timer(fn, title=title)
3110 fm.end()
3126 fm.end()
3111
3127
3112
3128
3113 @command(
3129 @command(
3114 b'perfrevlogrevision',
3130 b'perfrevlogrevision',
3115 revlogopts
3131 revlogopts
3116 + formatteropts
3132 + formatteropts
3117 + [(b'', b'cache', False, b'use caches instead of clearing')],
3133 + [(b'', b'cache', False, b'use caches instead of clearing')],
3118 b'-c|-m|FILE REV',
3134 b'-c|-m|FILE REV',
3119 )
3135 )
3120 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3136 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3121 """Benchmark obtaining a revlog revision.
3137 """Benchmark obtaining a revlog revision.
3122
3138
3123 Obtaining a revlog revision consists of roughly the following steps:
3139 Obtaining a revlog revision consists of roughly the following steps:
3124
3140
3125 1. Compute the delta chain
3141 1. Compute the delta chain
3126 2. Slice the delta chain if applicable
3142 2. Slice the delta chain if applicable
3127 3. Obtain the raw chunks for that delta chain
3143 3. Obtain the raw chunks for that delta chain
3128 4. Decompress each raw chunk
3144 4. Decompress each raw chunk
3129 5. Apply binary patches to obtain fulltext
3145 5. Apply binary patches to obtain fulltext
3130 6. Verify hash of fulltext
3146 6. Verify hash of fulltext
3131
3147
3132 This command measures the time spent in each of these phases.
3148 This command measures the time spent in each of these phases.
3133 """
3149 """
3134 opts = _byteskwargs(opts)
3150 opts = _byteskwargs(opts)
3135
3151
3136 if opts.get(b'changelog') or opts.get(b'manifest'):
3152 if opts.get(b'changelog') or opts.get(b'manifest'):
3137 file_, rev = None, file_
3153 file_, rev = None, file_
3138 elif rev is None:
3154 elif rev is None:
3139 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3155 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3140
3156
3141 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3157 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3142
3158
3143 # _chunkraw was renamed to _getsegmentforrevs.
3159 # _chunkraw was renamed to _getsegmentforrevs.
3144 try:
3160 try:
3145 segmentforrevs = r._getsegmentforrevs
3161 segmentforrevs = r._getsegmentforrevs
3146 except AttributeError:
3162 except AttributeError:
3147 segmentforrevs = r._chunkraw
3163 segmentforrevs = r._chunkraw
3148
3164
3149 node = r.lookup(rev)
3165 node = r.lookup(rev)
3150 rev = r.rev(node)
3166 rev = r.rev(node)
3151
3167
3152 def getrawchunks(data, chain):
3168 def getrawchunks(data, chain):
3153 start = r.start
3169 start = r.start
3154 length = r.length
3170 length = r.length
3155 inline = r._inline
3171 inline = r._inline
3156 iosize = r._io.size
3172 iosize = r._io.size
3157 buffer = util.buffer
3173 buffer = util.buffer
3158
3174
3159 chunks = []
3175 chunks = []
3160 ladd = chunks.append
3176 ladd = chunks.append
3161 for idx, item in enumerate(chain):
3177 for idx, item in enumerate(chain):
3162 offset = start(item[0])
3178 offset = start(item[0])
3163 bits = data[idx]
3179 bits = data[idx]
3164 for rev in item:
3180 for rev in item:
3165 chunkstart = start(rev)
3181 chunkstart = start(rev)
3166 if inline:
3182 if inline:
3167 chunkstart += (rev + 1) * iosize
3183 chunkstart += (rev + 1) * iosize
3168 chunklength = length(rev)
3184 chunklength = length(rev)
3169 ladd(buffer(bits, chunkstart - offset, chunklength))
3185 ladd(buffer(bits, chunkstart - offset, chunklength))
3170
3186
3171 return chunks
3187 return chunks
3172
3188
3173 def dodeltachain(rev):
3189 def dodeltachain(rev):
3174 if not cache:
3190 if not cache:
3175 r.clearcaches()
3191 r.clearcaches()
3176 r._deltachain(rev)
3192 r._deltachain(rev)
3177
3193
3178 def doread(chain):
3194 def doread(chain):
3179 if not cache:
3195 if not cache:
3180 r.clearcaches()
3196 r.clearcaches()
3181 for item in slicedchain:
3197 for item in slicedchain:
3182 segmentforrevs(item[0], item[-1])
3198 segmentforrevs(item[0], item[-1])
3183
3199
3184 def doslice(r, chain, size):
3200 def doslice(r, chain, size):
3185 for s in slicechunk(r, chain, targetsize=size):
3201 for s in slicechunk(r, chain, targetsize=size):
3186 pass
3202 pass
3187
3203
3188 def dorawchunks(data, chain):
3204 def dorawchunks(data, chain):
3189 if not cache:
3205 if not cache:
3190 r.clearcaches()
3206 r.clearcaches()
3191 getrawchunks(data, chain)
3207 getrawchunks(data, chain)
3192
3208
3193 def dodecompress(chunks):
3209 def dodecompress(chunks):
3194 decomp = r.decompress
3210 decomp = r.decompress
3195 for chunk in chunks:
3211 for chunk in chunks:
3196 decomp(chunk)
3212 decomp(chunk)
3197
3213
3198 def dopatch(text, bins):
3214 def dopatch(text, bins):
3199 if not cache:
3215 if not cache:
3200 r.clearcaches()
3216 r.clearcaches()
3201 mdiff.patches(text, bins)
3217 mdiff.patches(text, bins)
3202
3218
3203 def dohash(text):
3219 def dohash(text):
3204 if not cache:
3220 if not cache:
3205 r.clearcaches()
3221 r.clearcaches()
3206 r.checkhash(text, node, rev=rev)
3222 r.checkhash(text, node, rev=rev)
3207
3223
3208 def dorevision():
3224 def dorevision():
3209 if not cache:
3225 if not cache:
3210 r.clearcaches()
3226 r.clearcaches()
3211 r.revision(node)
3227 r.revision(node)
3212
3228
3213 try:
3229 try:
3214 from mercurial.revlogutils.deltas import slicechunk
3230 from mercurial.revlogutils.deltas import slicechunk
3215 except ImportError:
3231 except ImportError:
3216 slicechunk = getattr(revlog, '_slicechunk', None)
3232 slicechunk = getattr(revlog, '_slicechunk', None)
3217
3233
3218 size = r.length(rev)
3234 size = r.length(rev)
3219 chain = r._deltachain(rev)[0]
3235 chain = r._deltachain(rev)[0]
3220 if not getattr(r, '_withsparseread', False):
3236 if not getattr(r, '_withsparseread', False):
3221 slicedchain = (chain,)
3237 slicedchain = (chain,)
3222 else:
3238 else:
3223 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3239 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3224 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3240 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3225 rawchunks = getrawchunks(data, slicedchain)
3241 rawchunks = getrawchunks(data, slicedchain)
3226 bins = r._chunks(chain)
3242 bins = r._chunks(chain)
3227 text = bytes(bins[0])
3243 text = bytes(bins[0])
3228 bins = bins[1:]
3244 bins = bins[1:]
3229 text = mdiff.patches(text, bins)
3245 text = mdiff.patches(text, bins)
3230
3246
3231 benches = [
3247 benches = [
3232 (lambda: dorevision(), b'full'),
3248 (lambda: dorevision(), b'full'),
3233 (lambda: dodeltachain(rev), b'deltachain'),
3249 (lambda: dodeltachain(rev), b'deltachain'),
3234 (lambda: doread(chain), b'read'),
3250 (lambda: doread(chain), b'read'),
3235 ]
3251 ]
3236
3252
3237 if getattr(r, '_withsparseread', False):
3253 if getattr(r, '_withsparseread', False):
3238 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3254 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3239 benches.append(slicing)
3255 benches.append(slicing)
3240
3256
3241 benches.extend(
3257 benches.extend(
3242 [
3258 [
3243 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3259 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3244 (lambda: dodecompress(rawchunks), b'decompress'),
3260 (lambda: dodecompress(rawchunks), b'decompress'),
3245 (lambda: dopatch(text, bins), b'patch'),
3261 (lambda: dopatch(text, bins), b'patch'),
3246 (lambda: dohash(text), b'hash'),
3262 (lambda: dohash(text), b'hash'),
3247 ]
3263 ]
3248 )
3264 )
3249
3265
3250 timer, fm = gettimer(ui, opts)
3266 timer, fm = gettimer(ui, opts)
3251 for fn, title in benches:
3267 for fn, title in benches:
3252 timer(fn, title=title)
3268 timer(fn, title=title)
3253 fm.end()
3269 fm.end()
3254
3270
3255
3271
3256 @command(
3272 @command(
3257 b'perfrevset',
3273 b'perfrevset',
3258 [
3274 [
3259 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3275 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3260 (b'', b'contexts', False, b'obtain changectx for each revision'),
3276 (b'', b'contexts', False, b'obtain changectx for each revision'),
3261 ]
3277 ]
3262 + formatteropts,
3278 + formatteropts,
3263 b"REVSET",
3279 b"REVSET",
3264 )
3280 )
3265 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3281 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3266 """benchmark the execution time of a revset
3282 """benchmark the execution time of a revset
3267
3283
3268 Use the --clean option if need to evaluate the impact of build volatile
3284 Use the --clean option if need to evaluate the impact of build volatile
3269 revisions set cache on the revset execution. Volatile cache hold filtered
3285 revisions set cache on the revset execution. Volatile cache hold filtered
3270 and obsolete related cache."""
3286 and obsolete related cache."""
3271 opts = _byteskwargs(opts)
3287 opts = _byteskwargs(opts)
3272
3288
3273 timer, fm = gettimer(ui, opts)
3289 timer, fm = gettimer(ui, opts)
3274
3290
3275 def d():
3291 def d():
3276 if clear:
3292 if clear:
3277 repo.invalidatevolatilesets()
3293 repo.invalidatevolatilesets()
3278 if contexts:
3294 if contexts:
3279 for ctx in repo.set(expr):
3295 for ctx in repo.set(expr):
3280 pass
3296 pass
3281 else:
3297 else:
3282 for r in repo.revs(expr):
3298 for r in repo.revs(expr):
3283 pass
3299 pass
3284
3300
3285 timer(d)
3301 timer(d)
3286 fm.end()
3302 fm.end()
3287
3303
3288
3304
3289 @command(
3305 @command(
3290 b'perfvolatilesets',
3306 b'perfvolatilesets',
3291 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3307 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3292 + formatteropts,
3308 + formatteropts,
3293 )
3309 )
3294 def perfvolatilesets(ui, repo, *names, **opts):
3310 def perfvolatilesets(ui, repo, *names, **opts):
3295 """benchmark the computation of various volatile set
3311 """benchmark the computation of various volatile set
3296
3312
3297 Volatile set computes element related to filtering and obsolescence."""
3313 Volatile set computes element related to filtering and obsolescence."""
3298 opts = _byteskwargs(opts)
3314 opts = _byteskwargs(opts)
3299 timer, fm = gettimer(ui, opts)
3315 timer, fm = gettimer(ui, opts)
3300 repo = repo.unfiltered()
3316 repo = repo.unfiltered()
3301
3317
3302 def getobs(name):
3318 def getobs(name):
3303 def d():
3319 def d():
3304 repo.invalidatevolatilesets()
3320 repo.invalidatevolatilesets()
3305 if opts[b'clear_obsstore']:
3321 if opts[b'clear_obsstore']:
3306 clearfilecache(repo, b'obsstore')
3322 clearfilecache(repo, b'obsstore')
3307 obsolete.getrevs(repo, name)
3323 obsolete.getrevs(repo, name)
3308
3324
3309 return d
3325 return d
3310
3326
3311 allobs = sorted(obsolete.cachefuncs)
3327 allobs = sorted(obsolete.cachefuncs)
3312 if names:
3328 if names:
3313 allobs = [n for n in allobs if n in names]
3329 allobs = [n for n in allobs if n in names]
3314
3330
3315 for name in allobs:
3331 for name in allobs:
3316 timer(getobs(name), title=name)
3332 timer(getobs(name), title=name)
3317
3333
3318 def getfiltered(name):
3334 def getfiltered(name):
3319 def d():
3335 def d():
3320 repo.invalidatevolatilesets()
3336 repo.invalidatevolatilesets()
3321 if opts[b'clear_obsstore']:
3337 if opts[b'clear_obsstore']:
3322 clearfilecache(repo, b'obsstore')
3338 clearfilecache(repo, b'obsstore')
3323 repoview.filterrevs(repo, name)
3339 repoview.filterrevs(repo, name)
3324
3340
3325 return d
3341 return d
3326
3342
3327 allfilter = sorted(repoview.filtertable)
3343 allfilter = sorted(repoview.filtertable)
3328 if names:
3344 if names:
3329 allfilter = [n for n in allfilter if n in names]
3345 allfilter = [n for n in allfilter if n in names]
3330
3346
3331 for name in allfilter:
3347 for name in allfilter:
3332 timer(getfiltered(name), title=name)
3348 timer(getfiltered(name), title=name)
3333 fm.end()
3349 fm.end()
3334
3350
3335
3351
3336 @command(
3352 @command(
3337 b'perfbranchmap',
3353 b'perfbranchmap',
3338 [
3354 [
3339 (b'f', b'full', False, b'Includes build time of subset'),
3355 (b'f', b'full', False, b'Includes build time of subset'),
3340 (
3356 (
3341 b'',
3357 b'',
3342 b'clear-revbranch',
3358 b'clear-revbranch',
3343 False,
3359 False,
3344 b'purge the revbranch cache between computation',
3360 b'purge the revbranch cache between computation',
3345 ),
3361 ),
3346 ]
3362 ]
3347 + formatteropts,
3363 + formatteropts,
3348 )
3364 )
3349 def perfbranchmap(ui, repo, *filternames, **opts):
3365 def perfbranchmap(ui, repo, *filternames, **opts):
3350 """benchmark the update of a branchmap
3366 """benchmark the update of a branchmap
3351
3367
3352 This benchmarks the full repo.branchmap() call with read and write disabled
3368 This benchmarks the full repo.branchmap() call with read and write disabled
3353 """
3369 """
3354 opts = _byteskwargs(opts)
3370 opts = _byteskwargs(opts)
3355 full = opts.get(b"full", False)
3371 full = opts.get(b"full", False)
3356 clear_revbranch = opts.get(b"clear_revbranch", False)
3372 clear_revbranch = opts.get(b"clear_revbranch", False)
3357 timer, fm = gettimer(ui, opts)
3373 timer, fm = gettimer(ui, opts)
3358
3374
3359 def getbranchmap(filtername):
3375 def getbranchmap(filtername):
3360 """generate a benchmark function for the filtername"""
3376 """generate a benchmark function for the filtername"""
3361 if filtername is None:
3377 if filtername is None:
3362 view = repo
3378 view = repo
3363 else:
3379 else:
3364 view = repo.filtered(filtername)
3380 view = repo.filtered(filtername)
3365 if util.safehasattr(view._branchcaches, '_per_filter'):
3381 if util.safehasattr(view._branchcaches, '_per_filter'):
3366 filtered = view._branchcaches._per_filter
3382 filtered = view._branchcaches._per_filter
3367 else:
3383 else:
3368 # older versions
3384 # older versions
3369 filtered = view._branchcaches
3385 filtered = view._branchcaches
3370
3386
3371 def d():
3387 def d():
3372 if clear_revbranch:
3388 if clear_revbranch:
3373 repo.revbranchcache()._clear()
3389 repo.revbranchcache()._clear()
3374 if full:
3390 if full:
3375 view._branchcaches.clear()
3391 view._branchcaches.clear()
3376 else:
3392 else:
3377 filtered.pop(filtername, None)
3393 filtered.pop(filtername, None)
3378 view.branchmap()
3394 view.branchmap()
3379
3395
3380 return d
3396 return d
3381
3397
3382 # add filter in smaller subset to bigger subset
3398 # add filter in smaller subset to bigger subset
3383 possiblefilters = set(repoview.filtertable)
3399 possiblefilters = set(repoview.filtertable)
3384 if filternames:
3400 if filternames:
3385 possiblefilters &= set(filternames)
3401 possiblefilters &= set(filternames)
3386 subsettable = getbranchmapsubsettable()
3402 subsettable = getbranchmapsubsettable()
3387 allfilters = []
3403 allfilters = []
3388 while possiblefilters:
3404 while possiblefilters:
3389 for name in possiblefilters:
3405 for name in possiblefilters:
3390 subset = subsettable.get(name)
3406 subset = subsettable.get(name)
3391 if subset not in possiblefilters:
3407 if subset not in possiblefilters:
3392 break
3408 break
3393 else:
3409 else:
3394 assert False, b'subset cycle %s!' % possiblefilters
3410 assert False, b'subset cycle %s!' % possiblefilters
3395 allfilters.append(name)
3411 allfilters.append(name)
3396 possiblefilters.remove(name)
3412 possiblefilters.remove(name)
3397
3413
3398 # warm the cache
3414 # warm the cache
3399 if not full:
3415 if not full:
3400 for name in allfilters:
3416 for name in allfilters:
3401 repo.filtered(name).branchmap()
3417 repo.filtered(name).branchmap()
3402 if not filternames or b'unfiltered' in filternames:
3418 if not filternames or b'unfiltered' in filternames:
3403 # add unfiltered
3419 # add unfiltered
3404 allfilters.append(None)
3420 allfilters.append(None)
3405
3421
3406 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3422 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3407 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3423 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3408 branchcacheread.set(classmethod(lambda *args: None))
3424 branchcacheread.set(classmethod(lambda *args: None))
3409 else:
3425 else:
3410 # older versions
3426 # older versions
3411 branchcacheread = safeattrsetter(branchmap, b'read')
3427 branchcacheread = safeattrsetter(branchmap, b'read')
3412 branchcacheread.set(lambda *args: None)
3428 branchcacheread.set(lambda *args: None)
3413 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3429 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3414 branchcachewrite.set(lambda *args: None)
3430 branchcachewrite.set(lambda *args: None)
3415 try:
3431 try:
3416 for name in allfilters:
3432 for name in allfilters:
3417 printname = name
3433 printname = name
3418 if name is None:
3434 if name is None:
3419 printname = b'unfiltered'
3435 printname = b'unfiltered'
3420 timer(getbranchmap(name), title=str(printname))
3436 timer(getbranchmap(name), title=str(printname))
3421 finally:
3437 finally:
3422 branchcacheread.restore()
3438 branchcacheread.restore()
3423 branchcachewrite.restore()
3439 branchcachewrite.restore()
3424 fm.end()
3440 fm.end()
3425
3441
3426
3442
3427 @command(
3443 @command(
3428 b'perfbranchmapupdate',
3444 b'perfbranchmapupdate',
3429 [
3445 [
3430 (b'', b'base', [], b'subset of revision to start from'),
3446 (b'', b'base', [], b'subset of revision to start from'),
3431 (b'', b'target', [], b'subset of revision to end with'),
3447 (b'', b'target', [], b'subset of revision to end with'),
3432 (b'', b'clear-caches', False, b'clear cache between each runs'),
3448 (b'', b'clear-caches', False, b'clear cache between each runs'),
3433 ]
3449 ]
3434 + formatteropts,
3450 + formatteropts,
3435 )
3451 )
3436 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3452 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3437 """benchmark branchmap update from for <base> revs to <target> revs
3453 """benchmark branchmap update from for <base> revs to <target> revs
3438
3454
3439 If `--clear-caches` is passed, the following items will be reset before
3455 If `--clear-caches` is passed, the following items will be reset before
3440 each update:
3456 each update:
3441 * the changelog instance and associated indexes
3457 * the changelog instance and associated indexes
3442 * the rev-branch-cache instance
3458 * the rev-branch-cache instance
3443
3459
3444 Examples:
3460 Examples:
3445
3461
3446 # update for the one last revision
3462 # update for the one last revision
3447 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3463 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3448
3464
3449 $ update for change coming with a new branch
3465 $ update for change coming with a new branch
3450 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3466 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3451 """
3467 """
3452 from mercurial import branchmap
3468 from mercurial import branchmap
3453 from mercurial import repoview
3469 from mercurial import repoview
3454
3470
3455 opts = _byteskwargs(opts)
3471 opts = _byteskwargs(opts)
3456 timer, fm = gettimer(ui, opts)
3472 timer, fm = gettimer(ui, opts)
3457 clearcaches = opts[b'clear_caches']
3473 clearcaches = opts[b'clear_caches']
3458 unfi = repo.unfiltered()
3474 unfi = repo.unfiltered()
3459 x = [None] # used to pass data between closure
3475 x = [None] # used to pass data between closure
3460
3476
3461 # we use a `list` here to avoid possible side effect from smartset
3477 # we use a `list` here to avoid possible side effect from smartset
3462 baserevs = list(scmutil.revrange(repo, base))
3478 baserevs = list(scmutil.revrange(repo, base))
3463 targetrevs = list(scmutil.revrange(repo, target))
3479 targetrevs = list(scmutil.revrange(repo, target))
3464 if not baserevs:
3480 if not baserevs:
3465 raise error.Abort(b'no revisions selected for --base')
3481 raise error.Abort(b'no revisions selected for --base')
3466 if not targetrevs:
3482 if not targetrevs:
3467 raise error.Abort(b'no revisions selected for --target')
3483 raise error.Abort(b'no revisions selected for --target')
3468
3484
3469 # make sure the target branchmap also contains the one in the base
3485 # make sure the target branchmap also contains the one in the base
3470 targetrevs = list(set(baserevs) | set(targetrevs))
3486 targetrevs = list(set(baserevs) | set(targetrevs))
3471 targetrevs.sort()
3487 targetrevs.sort()
3472
3488
3473 cl = repo.changelog
3489 cl = repo.changelog
3474 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3490 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3475 allbaserevs.sort()
3491 allbaserevs.sort()
3476 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3492 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3477
3493
3478 newrevs = list(alltargetrevs.difference(allbaserevs))
3494 newrevs = list(alltargetrevs.difference(allbaserevs))
3479 newrevs.sort()
3495 newrevs.sort()
3480
3496
3481 allrevs = frozenset(unfi.changelog.revs())
3497 allrevs = frozenset(unfi.changelog.revs())
3482 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3498 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3483 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3499 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3484
3500
3485 def basefilter(repo, visibilityexceptions=None):
3501 def basefilter(repo, visibilityexceptions=None):
3486 return basefilterrevs
3502 return basefilterrevs
3487
3503
3488 def targetfilter(repo, visibilityexceptions=None):
3504 def targetfilter(repo, visibilityexceptions=None):
3489 return targetfilterrevs
3505 return targetfilterrevs
3490
3506
3491 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3507 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3492 ui.status(msg % (len(allbaserevs), len(newrevs)))
3508 ui.status(msg % (len(allbaserevs), len(newrevs)))
3493 if targetfilterrevs:
3509 if targetfilterrevs:
3494 msg = b'(%d revisions still filtered)\n'
3510 msg = b'(%d revisions still filtered)\n'
3495 ui.status(msg % len(targetfilterrevs))
3511 ui.status(msg % len(targetfilterrevs))
3496
3512
3497 try:
3513 try:
3498 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3514 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3499 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3515 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3500
3516
3501 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3517 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3502 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3518 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3503
3519
3504 # try to find an existing branchmap to reuse
3520 # try to find an existing branchmap to reuse
3505 subsettable = getbranchmapsubsettable()
3521 subsettable = getbranchmapsubsettable()
3506 candidatefilter = subsettable.get(None)
3522 candidatefilter = subsettable.get(None)
3507 while candidatefilter is not None:
3523 while candidatefilter is not None:
3508 candidatebm = repo.filtered(candidatefilter).branchmap()
3524 candidatebm = repo.filtered(candidatefilter).branchmap()
3509 if candidatebm.validfor(baserepo):
3525 if candidatebm.validfor(baserepo):
3510 filtered = repoview.filterrevs(repo, candidatefilter)
3526 filtered = repoview.filterrevs(repo, candidatefilter)
3511 missing = [r for r in allbaserevs if r in filtered]
3527 missing = [r for r in allbaserevs if r in filtered]
3512 base = candidatebm.copy()
3528 base = candidatebm.copy()
3513 base.update(baserepo, missing)
3529 base.update(baserepo, missing)
3514 break
3530 break
3515 candidatefilter = subsettable.get(candidatefilter)
3531 candidatefilter = subsettable.get(candidatefilter)
3516 else:
3532 else:
3517 # no suitable subset where found
3533 # no suitable subset where found
3518 base = branchmap.branchcache()
3534 base = branchmap.branchcache()
3519 base.update(baserepo, allbaserevs)
3535 base.update(baserepo, allbaserevs)
3520
3536
3521 def setup():
3537 def setup():
3522 x[0] = base.copy()
3538 x[0] = base.copy()
3523 if clearcaches:
3539 if clearcaches:
3524 unfi._revbranchcache = None
3540 unfi._revbranchcache = None
3525 clearchangelog(repo)
3541 clearchangelog(repo)
3526
3542
3527 def bench():
3543 def bench():
3528 x[0].update(targetrepo, newrevs)
3544 x[0].update(targetrepo, newrevs)
3529
3545
3530 timer(bench, setup=setup)
3546 timer(bench, setup=setup)
3531 fm.end()
3547 fm.end()
3532 finally:
3548 finally:
3533 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3549 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3534 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3550 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3535
3551
3536
3552
3537 @command(
3553 @command(
3538 b'perfbranchmapload',
3554 b'perfbranchmapload',
3539 [
3555 [
3540 (b'f', b'filter', b'', b'Specify repoview filter'),
3556 (b'f', b'filter', b'', b'Specify repoview filter'),
3541 (b'', b'list', False, b'List brachmap filter caches'),
3557 (b'', b'list', False, b'List brachmap filter caches'),
3542 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3558 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3543 ]
3559 ]
3544 + formatteropts,
3560 + formatteropts,
3545 )
3561 )
3546 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3562 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3547 """benchmark reading the branchmap"""
3563 """benchmark reading the branchmap"""
3548 opts = _byteskwargs(opts)
3564 opts = _byteskwargs(opts)
3549 clearrevlogs = opts[b'clear_revlogs']
3565 clearrevlogs = opts[b'clear_revlogs']
3550
3566
3551 if list:
3567 if list:
3552 for name, kind, st in repo.cachevfs.readdir(stat=True):
3568 for name, kind, st in repo.cachevfs.readdir(stat=True):
3553 if name.startswith(b'branch2'):
3569 if name.startswith(b'branch2'):
3554 filtername = name.partition(b'-')[2] or b'unfiltered'
3570 filtername = name.partition(b'-')[2] or b'unfiltered'
3555 ui.status(
3571 ui.status(
3556 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3572 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3557 )
3573 )
3558 return
3574 return
3559 if not filter:
3575 if not filter:
3560 filter = None
3576 filter = None
3561 subsettable = getbranchmapsubsettable()
3577 subsettable = getbranchmapsubsettable()
3562 if filter is None:
3578 if filter is None:
3563 repo = repo.unfiltered()
3579 repo = repo.unfiltered()
3564 else:
3580 else:
3565 repo = repoview.repoview(repo, filter)
3581 repo = repoview.repoview(repo, filter)
3566
3582
3567 repo.branchmap() # make sure we have a relevant, up to date branchmap
3583 repo.branchmap() # make sure we have a relevant, up to date branchmap
3568
3584
3569 try:
3585 try:
3570 fromfile = branchmap.branchcache.fromfile
3586 fromfile = branchmap.branchcache.fromfile
3571 except AttributeError:
3587 except AttributeError:
3572 # older versions
3588 # older versions
3573 fromfile = branchmap.read
3589 fromfile = branchmap.read
3574
3590
3575 currentfilter = filter
3591 currentfilter = filter
3576 # try once without timer, the filter may not be cached
3592 # try once without timer, the filter may not be cached
3577 while fromfile(repo) is None:
3593 while fromfile(repo) is None:
3578 currentfilter = subsettable.get(currentfilter)
3594 currentfilter = subsettable.get(currentfilter)
3579 if currentfilter is None:
3595 if currentfilter is None:
3580 raise error.Abort(
3596 raise error.Abort(
3581 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3597 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3582 )
3598 )
3583 repo = repo.filtered(currentfilter)
3599 repo = repo.filtered(currentfilter)
3584 timer, fm = gettimer(ui, opts)
3600 timer, fm = gettimer(ui, opts)
3585
3601
3586 def setup():
3602 def setup():
3587 if clearrevlogs:
3603 if clearrevlogs:
3588 clearchangelog(repo)
3604 clearchangelog(repo)
3589
3605
3590 def bench():
3606 def bench():
3591 fromfile(repo)
3607 fromfile(repo)
3592
3608
3593 timer(bench, setup=setup)
3609 timer(bench, setup=setup)
3594 fm.end()
3610 fm.end()
3595
3611
3596
3612
3597 @command(b'perfloadmarkers')
3613 @command(b'perfloadmarkers')
3598 def perfloadmarkers(ui, repo):
3614 def perfloadmarkers(ui, repo):
3599 """benchmark the time to parse the on-disk markers for a repo
3615 """benchmark the time to parse the on-disk markers for a repo
3600
3616
3601 Result is the number of markers in the repo."""
3617 Result is the number of markers in the repo."""
3602 timer, fm = gettimer(ui)
3618 timer, fm = gettimer(ui)
3603 svfs = getsvfs(repo)
3619 svfs = getsvfs(repo)
3604 timer(lambda: len(obsolete.obsstore(svfs)))
3620 timer(lambda: len(obsolete.obsstore(svfs)))
3605 fm.end()
3621 fm.end()
3606
3622
3607
3623
3608 @command(
3624 @command(
3609 b'perflrucachedict',
3625 b'perflrucachedict',
3610 formatteropts
3626 formatteropts
3611 + [
3627 + [
3612 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3628 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3613 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3629 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3614 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3630 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3615 (b'', b'size', 4, b'size of cache'),
3631 (b'', b'size', 4, b'size of cache'),
3616 (b'', b'gets', 10000, b'number of key lookups'),
3632 (b'', b'gets', 10000, b'number of key lookups'),
3617 (b'', b'sets', 10000, b'number of key sets'),
3633 (b'', b'sets', 10000, b'number of key sets'),
3618 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3634 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3619 (
3635 (
3620 b'',
3636 b'',
3621 b'mixedgetfreq',
3637 b'mixedgetfreq',
3622 50,
3638 50,
3623 b'frequency of get vs set ops in mixed mode',
3639 b'frequency of get vs set ops in mixed mode',
3624 ),
3640 ),
3625 ],
3641 ],
3626 norepo=True,
3642 norepo=True,
3627 )
3643 )
3628 def perflrucache(
3644 def perflrucache(
3629 ui,
3645 ui,
3630 mincost=0,
3646 mincost=0,
3631 maxcost=100,
3647 maxcost=100,
3632 costlimit=0,
3648 costlimit=0,
3633 size=4,
3649 size=4,
3634 gets=10000,
3650 gets=10000,
3635 sets=10000,
3651 sets=10000,
3636 mixed=10000,
3652 mixed=10000,
3637 mixedgetfreq=50,
3653 mixedgetfreq=50,
3638 **opts
3654 **opts
3639 ):
3655 ):
3640 opts = _byteskwargs(opts)
3656 opts = _byteskwargs(opts)
3641
3657
3642 def doinit():
3658 def doinit():
3643 for i in _xrange(10000):
3659 for i in _xrange(10000):
3644 util.lrucachedict(size)
3660 util.lrucachedict(size)
3645
3661
3646 costrange = list(range(mincost, maxcost + 1))
3662 costrange = list(range(mincost, maxcost + 1))
3647
3663
3648 values = []
3664 values = []
3649 for i in _xrange(size):
3665 for i in _xrange(size):
3650 values.append(random.randint(0, _maxint))
3666 values.append(random.randint(0, _maxint))
3651
3667
3652 # Get mode fills the cache and tests raw lookup performance with no
3668 # Get mode fills the cache and tests raw lookup performance with no
3653 # eviction.
3669 # eviction.
3654 getseq = []
3670 getseq = []
3655 for i in _xrange(gets):
3671 for i in _xrange(gets):
3656 getseq.append(random.choice(values))
3672 getseq.append(random.choice(values))
3657
3673
3658 def dogets():
3674 def dogets():
3659 d = util.lrucachedict(size)
3675 d = util.lrucachedict(size)
3660 for v in values:
3676 for v in values:
3661 d[v] = v
3677 d[v] = v
3662 for key in getseq:
3678 for key in getseq:
3663 value = d[key]
3679 value = d[key]
3664 value # silence pyflakes warning
3680 value # silence pyflakes warning
3665
3681
3666 def dogetscost():
3682 def dogetscost():
3667 d = util.lrucachedict(size, maxcost=costlimit)
3683 d = util.lrucachedict(size, maxcost=costlimit)
3668 for i, v in enumerate(values):
3684 for i, v in enumerate(values):
3669 d.insert(v, v, cost=costs[i])
3685 d.insert(v, v, cost=costs[i])
3670 for key in getseq:
3686 for key in getseq:
3671 try:
3687 try:
3672 value = d[key]
3688 value = d[key]
3673 value # silence pyflakes warning
3689 value # silence pyflakes warning
3674 except KeyError:
3690 except KeyError:
3675 pass
3691 pass
3676
3692
3677 # Set mode tests insertion speed with cache eviction.
3693 # Set mode tests insertion speed with cache eviction.
3678 setseq = []
3694 setseq = []
3679 costs = []
3695 costs = []
3680 for i in _xrange(sets):
3696 for i in _xrange(sets):
3681 setseq.append(random.randint(0, _maxint))
3697 setseq.append(random.randint(0, _maxint))
3682 costs.append(random.choice(costrange))
3698 costs.append(random.choice(costrange))
3683
3699
3684 def doinserts():
3700 def doinserts():
3685 d = util.lrucachedict(size)
3701 d = util.lrucachedict(size)
3686 for v in setseq:
3702 for v in setseq:
3687 d.insert(v, v)
3703 d.insert(v, v)
3688
3704
3689 def doinsertscost():
3705 def doinsertscost():
3690 d = util.lrucachedict(size, maxcost=costlimit)
3706 d = util.lrucachedict(size, maxcost=costlimit)
3691 for i, v in enumerate(setseq):
3707 for i, v in enumerate(setseq):
3692 d.insert(v, v, cost=costs[i])
3708 d.insert(v, v, cost=costs[i])
3693
3709
3694 def dosets():
3710 def dosets():
3695 d = util.lrucachedict(size)
3711 d = util.lrucachedict(size)
3696 for v in setseq:
3712 for v in setseq:
3697 d[v] = v
3713 d[v] = v
3698
3714
3699 # Mixed mode randomly performs gets and sets with eviction.
3715 # Mixed mode randomly performs gets and sets with eviction.
3700 mixedops = []
3716 mixedops = []
3701 for i in _xrange(mixed):
3717 for i in _xrange(mixed):
3702 r = random.randint(0, 100)
3718 r = random.randint(0, 100)
3703 if r < mixedgetfreq:
3719 if r < mixedgetfreq:
3704 op = 0
3720 op = 0
3705 else:
3721 else:
3706 op = 1
3722 op = 1
3707
3723
3708 mixedops.append(
3724 mixedops.append(
3709 (op, random.randint(0, size * 2), random.choice(costrange))
3725 (op, random.randint(0, size * 2), random.choice(costrange))
3710 )
3726 )
3711
3727
3712 def domixed():
3728 def domixed():
3713 d = util.lrucachedict(size)
3729 d = util.lrucachedict(size)
3714
3730
3715 for op, v, cost in mixedops:
3731 for op, v, cost in mixedops:
3716 if op == 0:
3732 if op == 0:
3717 try:
3733 try:
3718 d[v]
3734 d[v]
3719 except KeyError:
3735 except KeyError:
3720 pass
3736 pass
3721 else:
3737 else:
3722 d[v] = v
3738 d[v] = v
3723
3739
3724 def domixedcost():
3740 def domixedcost():
3725 d = util.lrucachedict(size, maxcost=costlimit)
3741 d = util.lrucachedict(size, maxcost=costlimit)
3726
3742
3727 for op, v, cost in mixedops:
3743 for op, v, cost in mixedops:
3728 if op == 0:
3744 if op == 0:
3729 try:
3745 try:
3730 d[v]
3746 d[v]
3731 except KeyError:
3747 except KeyError:
3732 pass
3748 pass
3733 else:
3749 else:
3734 d.insert(v, v, cost=cost)
3750 d.insert(v, v, cost=cost)
3735
3751
3736 benches = [
3752 benches = [
3737 (doinit, b'init'),
3753 (doinit, b'init'),
3738 ]
3754 ]
3739
3755
3740 if costlimit:
3756 if costlimit:
3741 benches.extend(
3757 benches.extend(
3742 [
3758 [
3743 (dogetscost, b'gets w/ cost limit'),
3759 (dogetscost, b'gets w/ cost limit'),
3744 (doinsertscost, b'inserts w/ cost limit'),
3760 (doinsertscost, b'inserts w/ cost limit'),
3745 (domixedcost, b'mixed w/ cost limit'),
3761 (domixedcost, b'mixed w/ cost limit'),
3746 ]
3762 ]
3747 )
3763 )
3748 else:
3764 else:
3749 benches.extend(
3765 benches.extend(
3750 [
3766 [
3751 (dogets, b'gets'),
3767 (dogets, b'gets'),
3752 (doinserts, b'inserts'),
3768 (doinserts, b'inserts'),
3753 (dosets, b'sets'),
3769 (dosets, b'sets'),
3754 (domixed, b'mixed'),
3770 (domixed, b'mixed'),
3755 ]
3771 ]
3756 )
3772 )
3757
3773
3758 for fn, title in benches:
3774 for fn, title in benches:
3759 timer, fm = gettimer(ui, opts)
3775 timer, fm = gettimer(ui, opts)
3760 timer(fn, title=title)
3776 timer(fn, title=title)
3761 fm.end()
3777 fm.end()
3762
3778
3763
3779
3764 @command(b'perfwrite', formatteropts)
3780 @command(b'perfwrite', formatteropts)
3765 def perfwrite(ui, repo, **opts):
3781 def perfwrite(ui, repo, **opts):
3766 """microbenchmark ui.write
3782 """microbenchmark ui.write
3767 """
3783 """
3768 opts = _byteskwargs(opts)
3784 opts = _byteskwargs(opts)
3769
3785
3770 timer, fm = gettimer(ui, opts)
3786 timer, fm = gettimer(ui, opts)
3771
3787
3772 def write():
3788 def write():
3773 for i in range(100000):
3789 for i in range(100000):
3774 ui.writenoi18n(b'Testing write performance\n')
3790 ui.writenoi18n(b'Testing write performance\n')
3775
3791
3776 timer(write)
3792 timer(write)
3777 fm.end()
3793 fm.end()
3778
3794
3779
3795
3780 def uisetup(ui):
3796 def uisetup(ui):
3781 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3797 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3782 commands, b'debugrevlogopts'
3798 commands, b'debugrevlogopts'
3783 ):
3799 ):
3784 # for "historical portability":
3800 # for "historical portability":
3785 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3801 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3786 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3802 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3787 # openrevlog() should cause failure, because it has been
3803 # openrevlog() should cause failure, because it has been
3788 # available since 3.5 (or 49c583ca48c4).
3804 # available since 3.5 (or 49c583ca48c4).
3789 def openrevlog(orig, repo, cmd, file_, opts):
3805 def openrevlog(orig, repo, cmd, file_, opts):
3790 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3806 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3791 raise error.Abort(
3807 raise error.Abort(
3792 b"This version doesn't support --dir option",
3808 b"This version doesn't support --dir option",
3793 hint=b"use 3.5 or later",
3809 hint=b"use 3.5 or later",
3794 )
3810 )
3795 return orig(repo, cmd, file_, opts)
3811 return orig(repo, cmd, file_, opts)
3796
3812
3797 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3813 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3798
3814
3799
3815
3800 @command(
3816 @command(
3801 b'perfprogress',
3817 b'perfprogress',
3802 formatteropts
3818 formatteropts
3803 + [
3819 + [
3804 (b'', b'topic', b'topic', b'topic for progress messages'),
3820 (b'', b'topic', b'topic', b'topic for progress messages'),
3805 (b'c', b'total', 1000000, b'total value we are progressing to'),
3821 (b'c', b'total', 1000000, b'total value we are progressing to'),
3806 ],
3822 ],
3807 norepo=True,
3823 norepo=True,
3808 )
3824 )
3809 def perfprogress(ui, topic=None, total=None, **opts):
3825 def perfprogress(ui, topic=None, total=None, **opts):
3810 """printing of progress bars"""
3826 """printing of progress bars"""
3811 opts = _byteskwargs(opts)
3827 opts = _byteskwargs(opts)
3812
3828
3813 timer, fm = gettimer(ui, opts)
3829 timer, fm = gettimer(ui, opts)
3814
3830
3815 def doprogress():
3831 def doprogress():
3816 with ui.makeprogress(topic, total=total) as progress:
3832 with ui.makeprogress(topic, total=total) as progress:
3817 for i in _xrange(total):
3833 for i in _xrange(total):
3818 progress.increment()
3834 progress.increment()
3819
3835
3820 timer(doprogress)
3836 timer(doprogress)
3821 fm.end()
3837 fm.end()
@@ -1,398 +1,399 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perfaddremove
81 perfaddremove
82 (no help text available)
82 (no help text available)
83 perfancestors
83 perfancestors
84 (no help text available)
84 (no help text available)
85 perfancestorset
85 perfancestorset
86 (no help text available)
86 (no help text available)
87 perfannotate (no help text available)
87 perfannotate (no help text available)
88 perfbdiff benchmark a bdiff between revisions
88 perfbdiff benchmark a bdiff between revisions
89 perfbookmarks
89 perfbookmarks
90 benchmark parsing bookmarks from disk to memory
90 benchmark parsing bookmarks from disk to memory
91 perfbranchmap
91 perfbranchmap
92 benchmark the update of a branchmap
92 benchmark the update of a branchmap
93 perfbranchmapload
93 perfbranchmapload
94 benchmark reading the branchmap
94 benchmark reading the branchmap
95 perfbranchmapupdate
95 perfbranchmapupdate
96 benchmark branchmap update from for <base> revs to <target>
96 benchmark branchmap update from for <base> revs to <target>
97 revs
97 revs
98 perfbundleread
98 perfbundleread
99 Benchmark reading of bundle files.
99 Benchmark reading of bundle files.
100 perfcca (no help text available)
100 perfcca (no help text available)
101 perfchangegroupchangelog
101 perfchangegroupchangelog
102 Benchmark producing a changelog group for a changegroup.
102 Benchmark producing a changelog group for a changegroup.
103 perfchangeset
103 perfchangeset
104 (no help text available)
104 (no help text available)
105 perfctxfiles (no help text available)
105 perfctxfiles (no help text available)
106 perfdiffwd Profile diff of working directory changes
106 perfdiffwd Profile diff of working directory changes
107 perfdirfoldmap
107 perfdirfoldmap
108 benchmap a 'dirstate._map.dirfoldmap.get()' request
108 benchmap a 'dirstate._map.dirfoldmap.get()' request
109 perfdirs (no help text available)
109 perfdirs (no help text available)
110 perfdirstate benchmap the time of various distate operations
110 perfdirstate benchmap the time of various distate operations
111 perfdirstatedirs
111 perfdirstatedirs
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
113 perfdirstatefoldmap
113 perfdirstatefoldmap
114 benchmap a 'dirstate._map.filefoldmap.get()' request
114 benchmap a 'dirstate._map.filefoldmap.get()' request
115 perfdirstatewrite
115 perfdirstatewrite
116 benchmap the time it take to write a dirstate on disk
116 benchmap the time it take to write a dirstate on disk
117 perfdiscovery
117 perfdiscovery
118 benchmark discovery between local repo and the peer at given
118 benchmark discovery between local repo and the peer at given
119 path
119 path
120 perffncacheencode
120 perffncacheencode
121 (no help text available)
121 (no help text available)
122 perffncacheload
122 perffncacheload
123 (no help text available)
123 (no help text available)
124 perffncachewrite
124 perffncachewrite
125 (no help text available)
125 (no help text available)
126 perfheads benchmark the computation of a changelog heads
126 perfheads benchmark the computation of a changelog heads
127 perfhelper-mergecopies
127 perfhelper-mergecopies
128 find statistics about potential parameters for
128 find statistics about potential parameters for
129 'perfmergecopies'
129 'perfmergecopies'
130 perfhelper-pathcopies
130 perfhelper-pathcopies
131 find statistic about potential parameters for the
131 find statistic about potential parameters for the
132 'perftracecopies'
132 'perftracecopies'
133 perfignore benchmark operation related to computing ignore
133 perfignore benchmark operation related to computing ignore
134 perfindex benchmark index creation time followed by a lookup
134 perfindex benchmark index creation time followed by a lookup
135 perflinelogedits
135 perflinelogedits
136 (no help text available)
136 (no help text available)
137 perfloadmarkers
137 perfloadmarkers
138 benchmark the time to parse the on-disk markers for a repo
138 benchmark the time to parse the on-disk markers for a repo
139 perflog (no help text available)
139 perflog (no help text available)
140 perflookup (no help text available)
140 perflookup (no help text available)
141 perflrucachedict
141 perflrucachedict
142 (no help text available)
142 (no help text available)
143 perfmanifest benchmark the time to read a manifest from disk and return a
143 perfmanifest benchmark the time to read a manifest from disk and return a
144 usable
144 usable
145 perfmergecalculate
145 perfmergecalculate
146 (no help text available)
146 (no help text available)
147 perfmergecopies
147 perfmergecopies
148 measure runtime of 'copies.mergecopies'
148 measure runtime of 'copies.mergecopies'
149 perfmoonwalk benchmark walking the changelog backwards
149 perfmoonwalk benchmark walking the changelog backwards
150 perfnodelookup
150 perfnodelookup
151 (no help text available)
151 (no help text available)
152 perfnodemap benchmark the time necessary to look up revision from a cold
152 perfnodemap benchmark the time necessary to look up revision from a cold
153 nodemap
153 nodemap
154 perfparents benchmark the time necessary to fetch one changeset's parents.
154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 perfpathcopies
155 perfpathcopies
156 benchmark the copy tracing logic
156 benchmark the copy tracing logic
157 perfphases benchmark phasesets computation
157 perfphases benchmark phasesets computation
158 perfphasesremote
158 perfphasesremote
159 benchmark time needed to analyse phases of the remote server
159 benchmark time needed to analyse phases of the remote server
160 perfprogress printing of progress bars
160 perfprogress printing of progress bars
161 perfrawfiles (no help text available)
161 perfrawfiles (no help text available)
162 perfrevlogchunks
162 perfrevlogchunks
163 Benchmark operations on revlog chunks.
163 Benchmark operations on revlog chunks.
164 perfrevlogindex
164 perfrevlogindex
165 Benchmark operations against a revlog index.
165 Benchmark operations against a revlog index.
166 perfrevlogrevision
166 perfrevlogrevision
167 Benchmark obtaining a revlog revision.
167 Benchmark obtaining a revlog revision.
168 perfrevlogrevisions
168 perfrevlogrevisions
169 Benchmark reading a series of revisions from a revlog.
169 Benchmark reading a series of revisions from a revlog.
170 perfrevlogwrite
170 perfrevlogwrite
171 Benchmark writing a series of revisions to a revlog.
171 Benchmark writing a series of revisions to a revlog.
172 perfrevrange (no help text available)
172 perfrevrange (no help text available)
173 perfrevset benchmark the execution time of a revset
173 perfrevset benchmark the execution time of a revset
174 perfstartup (no help text available)
174 perfstartup (no help text available)
175 perfstatus benchmark the performance of a single status call
175 perfstatus benchmark the performance of a single status call
176 perftags (no help text available)
176 perftags (no help text available)
177 perftemplating
177 perftemplating
178 test the rendering time of a given template
178 test the rendering time of a given template
179 perfunidiff benchmark a unified diff between revisions
179 perfunidiff benchmark a unified diff between revisions
180 perfvolatilesets
180 perfvolatilesets
181 benchmark the computation of various volatile set
181 benchmark the computation of various volatile set
182 perfwalk (no help text available)
182 perfwalk (no help text available)
183 perfwrite microbenchmark ui.write
183 perfwrite microbenchmark ui.write
184
184
185 (use 'hg help -v perf' to show built-in aliases and global options)
185 (use 'hg help -v perf' to show built-in aliases and global options)
186 $ hg perfaddremove
186 $ hg perfaddremove
187 $ hg perfancestors
187 $ hg perfancestors
188 $ hg perfancestorset 2
188 $ hg perfancestorset 2
189 $ hg perfannotate a
189 $ hg perfannotate a
190 $ hg perfbdiff -c 1
190 $ hg perfbdiff -c 1
191 $ hg perfbdiff --alldata 1
191 $ hg perfbdiff --alldata 1
192 $ hg perfunidiff -c 1
192 $ hg perfunidiff -c 1
193 $ hg perfunidiff --alldata 1
193 $ hg perfunidiff --alldata 1
194 $ hg perfbookmarks
194 $ hg perfbookmarks
195 $ hg perfbranchmap
195 $ hg perfbranchmap
196 $ hg perfbranchmapload
196 $ hg perfbranchmapload
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 benchmark of branchmap with 3 revisions with 1 new ones
198 benchmark of branchmap with 3 revisions with 1 new ones
199 $ hg perfcca
199 $ hg perfcca
200 $ hg perfchangegroupchangelog
200 $ hg perfchangegroupchangelog
201 $ hg perfchangegroupchangelog --cgversion 01
201 $ hg perfchangegroupchangelog --cgversion 01
202 $ hg perfchangeset 2
202 $ hg perfchangeset 2
203 $ hg perfctxfiles 2
203 $ hg perfctxfiles 2
204 $ hg perfdiffwd
204 $ hg perfdiffwd
205 $ hg perfdirfoldmap
205 $ hg perfdirfoldmap
206 $ hg perfdirs
206 $ hg perfdirs
207 $ hg perfdirstate
207 $ hg perfdirstate
208 $ hg perfdirstate --contains
208 $ hg perfdirstate --contains
209 $ hg perfdirstate --iteration
209 $ hg perfdirstate --iteration
210 $ hg perfdirstatedirs
210 $ hg perfdirstatedirs
211 $ hg perfdirstatefoldmap
211 $ hg perfdirstatefoldmap
212 $ hg perfdirstatewrite
212 $ hg perfdirstatewrite
213 #if repofncache
213 #if repofncache
214 $ hg perffncacheencode
214 $ hg perffncacheencode
215 $ hg perffncacheload
215 $ hg perffncacheload
216 $ hg debugrebuildfncache
216 $ hg debugrebuildfncache
217 fncache already up to date
217 fncache already up to date
218 $ hg perffncachewrite
218 $ hg perffncachewrite
219 $ hg debugrebuildfncache
219 $ hg debugrebuildfncache
220 fncache already up to date
220 fncache already up to date
221 #endif
221 #endif
222 $ hg perfheads
222 $ hg perfheads
223 $ hg perfignore
223 $ hg perfignore
224 $ hg perfindex
224 $ hg perfindex
225 $ hg perflinelogedits -n 1
225 $ hg perflinelogedits -n 1
226 $ hg perfloadmarkers
226 $ hg perfloadmarkers
227 $ hg perflog
227 $ hg perflog
228 $ hg perflookup 2
228 $ hg perflookup 2
229 $ hg perflrucache
229 $ hg perflrucache
230 $ hg perfmanifest 2
230 $ hg perfmanifest 2
231 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
231 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
232 $ hg perfmanifest -m 44fe2c8352bb
232 $ hg perfmanifest -m 44fe2c8352bb
233 abort: manifest revision must be integer or full node
233 abort: manifest revision must be integer or full node
234 [255]
234 [255]
235 $ hg perfmergecalculate -r 3
235 $ hg perfmergecalculate -r 3
236 $ hg perfmoonwalk
236 $ hg perfmoonwalk
237 $ hg perfnodelookup 2
237 $ hg perfnodelookup 2
238 $ hg perfpathcopies 1 2
238 $ hg perfpathcopies 1 2
239 $ hg perfprogress --total 1000
239 $ hg perfprogress --total 1000
240 $ hg perfrawfiles 2
240 $ hg perfrawfiles 2
241 $ hg perfrevlogindex -c
241 $ hg perfrevlogindex -c
242 #if reporevlogstore
242 #if reporevlogstore
243 $ hg perfrevlogrevisions .hg/store/data/a.i
243 $ hg perfrevlogrevisions .hg/store/data/a.i
244 #endif
244 #endif
245 $ hg perfrevlogrevision -m 0
245 $ hg perfrevlogrevision -m 0
246 $ hg perfrevlogchunks -c
246 $ hg perfrevlogchunks -c
247 $ hg perfrevrange
247 $ hg perfrevrange
248 $ hg perfrevset 'all()'
248 $ hg perfrevset 'all()'
249 $ hg perfstartup
249 $ hg perfstartup
250 $ hg perfstatus
250 $ hg perfstatus
251 $ hg perfstatus --dirstate
251 $ hg perftags
252 $ hg perftags
252 $ hg perftemplating
253 $ hg perftemplating
253 $ hg perfvolatilesets
254 $ hg perfvolatilesets
254 $ hg perfwalk
255 $ hg perfwalk
255 $ hg perfparents
256 $ hg perfparents
256 $ hg perfdiscovery -q .
257 $ hg perfdiscovery -q .
257
258
258 Test run control
259 Test run control
259 ----------------
260 ----------------
260
261
261 Simple single entry
262 Simple single entry
262
263
263 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
264 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
264 ! wall * comb * user * sys * (best of 15) (glob)
265 ! wall * comb * user * sys * (best of 15) (glob)
265
266
266 Multiple entries
267 Multiple entries
267
268
268 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
269 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
269 ! wall * comb * user * sys * (best of 5) (glob)
270 ! wall * comb * user * sys * (best of 5) (glob)
270
271
271 error case are ignored
272 error case are ignored
272
273
273 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
274 malformatted run limit entry, missing "-": 500
275 malformatted run limit entry, missing "-": 500
275 ! wall * comb * user * sys * (best of 5) (glob)
276 ! wall * comb * user * sys * (best of 5) (glob)
276 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
277 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
277 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
278 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
278 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
279 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
279 ! wall * comb * user * sys * (best of 5) (glob)
280 ! wall * comb * user * sys * (best of 5) (glob)
280 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
281 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
281 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
282 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
282 ! wall * comb * user * sys * (best of 5) (glob)
283 ! wall * comb * user * sys * (best of 5) (glob)
283
284
284 test actual output
285 test actual output
285 ------------------
286 ------------------
286
287
287 normal output:
288 normal output:
288
289
289 $ hg perfheads --config perf.stub=no
290 $ hg perfheads --config perf.stub=no
290 ! wall * comb * user * sys * (best of *) (glob)
291 ! wall * comb * user * sys * (best of *) (glob)
291
292
292 detailed output:
293 detailed output:
293
294
294 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
295 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
295 ! wall * comb * user * sys * (best of *) (glob)
296 ! wall * comb * user * sys * (best of *) (glob)
296 ! wall * comb * user * sys * (max of *) (glob)
297 ! wall * comb * user * sys * (max of *) (glob)
297 ! wall * comb * user * sys * (avg of *) (glob)
298 ! wall * comb * user * sys * (avg of *) (glob)
298 ! wall * comb * user * sys * (median of *) (glob)
299 ! wall * comb * user * sys * (median of *) (glob)
299
300
300 test json output
301 test json output
301 ----------------
302 ----------------
302
303
303 normal output:
304 normal output:
304
305
305 $ hg perfheads --template json --config perf.stub=no
306 $ hg perfheads --template json --config perf.stub=no
306 [
307 [
307 {
308 {
308 "comb": *, (glob)
309 "comb": *, (glob)
309 "count": *, (glob)
310 "count": *, (glob)
310 "sys": *, (glob)
311 "sys": *, (glob)
311 "user": *, (glob)
312 "user": *, (glob)
312 "wall": * (glob)
313 "wall": * (glob)
313 }
314 }
314 ]
315 ]
315
316
316 detailed output:
317 detailed output:
317
318
318 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
319 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
319 [
320 [
320 {
321 {
321 "avg.comb": *, (glob)
322 "avg.comb": *, (glob)
322 "avg.count": *, (glob)
323 "avg.count": *, (glob)
323 "avg.sys": *, (glob)
324 "avg.sys": *, (glob)
324 "avg.user": *, (glob)
325 "avg.user": *, (glob)
325 "avg.wall": *, (glob)
326 "avg.wall": *, (glob)
326 "comb": *, (glob)
327 "comb": *, (glob)
327 "count": *, (glob)
328 "count": *, (glob)
328 "max.comb": *, (glob)
329 "max.comb": *, (glob)
329 "max.count": *, (glob)
330 "max.count": *, (glob)
330 "max.sys": *, (glob)
331 "max.sys": *, (glob)
331 "max.user": *, (glob)
332 "max.user": *, (glob)
332 "max.wall": *, (glob)
333 "max.wall": *, (glob)
333 "median.comb": *, (glob)
334 "median.comb": *, (glob)
334 "median.count": *, (glob)
335 "median.count": *, (glob)
335 "median.sys": *, (glob)
336 "median.sys": *, (glob)
336 "median.user": *, (glob)
337 "median.user": *, (glob)
337 "median.wall": *, (glob)
338 "median.wall": *, (glob)
338 "sys": *, (glob)
339 "sys": *, (glob)
339 "user": *, (glob)
340 "user": *, (glob)
340 "wall": * (glob)
341 "wall": * (glob)
341 }
342 }
342 ]
343 ]
343
344
344 Test pre-run feature
345 Test pre-run feature
345 --------------------
346 --------------------
346
347
347 (perf discovery has some spurious output)
348 (perf discovery has some spurious output)
348
349
349 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
350 ! wall * comb * user * sys * (best of 1) (glob)
351 ! wall * comb * user * sys * (best of 1) (glob)
351 searching for changes
352 searching for changes
352 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
353 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
353 ! wall * comb * user * sys * (best of 1) (glob)
354 ! wall * comb * user * sys * (best of 1) (glob)
354 searching for changes
355 searching for changes
355 searching for changes
356 searching for changes
356 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
357 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
357 ! wall * comb * user * sys * (best of 1) (glob)
358 ! wall * comb * user * sys * (best of 1) (glob)
358 searching for changes
359 searching for changes
359 searching for changes
360 searching for changes
360 searching for changes
361 searching for changes
361 searching for changes
362 searching for changes
362
363
363 test profile-benchmark option
364 test profile-benchmark option
364 ------------------------------
365 ------------------------------
365
366
366 Function to check that statprof ran
367 Function to check that statprof ran
367 $ statprofran () {
368 $ statprofran () {
368 > egrep 'Sample count:|No samples recorded' > /dev/null
369 > egrep 'Sample count:|No samples recorded' > /dev/null
369 > }
370 > }
370 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
371 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
371
372
372 Check perf.py for historical portability
373 Check perf.py for historical portability
373 ----------------------------------------
374 ----------------------------------------
374
375
375 $ cd "$TESTDIR/.."
376 $ cd "$TESTDIR/.."
376
377
377 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
378 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
378 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
379 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
379 > "$TESTDIR"/check-perf-code.py contrib/perf.py
380 > "$TESTDIR"/check-perf-code.py contrib/perf.py
380 contrib/perf.py:\d+: (re)
381 contrib/perf.py:\d+: (re)
381 > from mercurial import (
382 > from mercurial import (
382 import newer module separately in try clause for early Mercurial
383 import newer module separately in try clause for early Mercurial
383 contrib/perf.py:\d+: (re)
384 contrib/perf.py:\d+: (re)
384 > from mercurial import (
385 > from mercurial import (
385 import newer module separately in try clause for early Mercurial
386 import newer module separately in try clause for early Mercurial
386 contrib/perf.py:\d+: (re)
387 contrib/perf.py:\d+: (re)
387 > origindexpath = orig.opener.join(orig.indexfile)
388 > origindexpath = orig.opener.join(orig.indexfile)
388 use getvfs()/getsvfs() for early Mercurial
389 use getvfs()/getsvfs() for early Mercurial
389 contrib/perf.py:\d+: (re)
390 contrib/perf.py:\d+: (re)
390 > origdatapath = orig.opener.join(orig.datafile)
391 > origdatapath = orig.opener.join(orig.datafile)
391 use getvfs()/getsvfs() for early Mercurial
392 use getvfs()/getsvfs() for early Mercurial
392 contrib/perf.py:\d+: (re)
393 contrib/perf.py:\d+: (re)
393 > vfs = vfsmod.vfs(tmpdir)
394 > vfs = vfsmod.vfs(tmpdir)
394 use getvfs()/getsvfs() for early Mercurial
395 use getvfs()/getsvfs() for early Mercurial
395 contrib/perf.py:\d+: (re)
396 contrib/perf.py:\d+: (re)
396 > vfs.options = getattr(orig.opener, 'options', None)
397 > vfs.options = getattr(orig.opener, 'options', None)
397 use getvfs()/getsvfs() for early Mercurial
398 use getvfs()/getsvfs() for early Mercurial
398 [1]
399 [1]
General Comments 0
You need to be logged in to leave comments. Login now