##// END OF EJS Templates
perf: introduce a `--iteration` to `perfdirstate`...
marmoute -
r43466:5f9b1250 default
parent child Browse files
Show More
@@ -1,3778 +1,3789 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
767 """benchmark the performance of a single status call
768
768
769 The repository data are preserved between each call.
769 The repository data are preserved between each call.
770
770
771 By default, only the status of the tracked file are requested. If
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
773 """
774 opts = _byteskwargs(opts)
774 opts = _byteskwargs(opts)
775 # m = match.always(repo.root, repo.getcwd())
775 # m = match.always(repo.root, repo.getcwd())
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 # False))))
777 # False))))
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 fm.end()
780 fm.end()
781
781
782
782
783 @command(b'perfaddremove', formatteropts)
783 @command(b'perfaddremove', formatteropts)
784 def perfaddremove(ui, repo, **opts):
784 def perfaddremove(ui, repo, **opts):
785 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
787 try:
787 try:
788 oldquiet = repo.ui.quiet
788 oldquiet = repo.ui.quiet
789 repo.ui.quiet = True
789 repo.ui.quiet = True
790 matcher = scmutil.match(repo[None])
790 matcher = scmutil.match(repo[None])
791 opts[b'dry_run'] = True
791 opts[b'dry_run'] = True
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 uipathfn = scmutil.getuipathfn(repo)
793 uipathfn = scmutil.getuipathfn(repo)
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 else:
795 else:
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 finally:
797 finally:
798 repo.ui.quiet = oldquiet
798 repo.ui.quiet = oldquiet
799 fm.end()
799 fm.end()
800
800
801
801
802 def clearcaches(cl):
802 def clearcaches(cl):
803 # behave somewhat consistently across internal API changes
803 # behave somewhat consistently across internal API changes
804 if util.safehasattr(cl, b'clearcaches'):
804 if util.safehasattr(cl, b'clearcaches'):
805 cl.clearcaches()
805 cl.clearcaches()
806 elif util.safehasattr(cl, b'_nodecache'):
806 elif util.safehasattr(cl, b'_nodecache'):
807 from mercurial.node import nullid, nullrev
807 from mercurial.node import nullid, nullrev
808
808
809 cl._nodecache = {nullid: nullrev}
809 cl._nodecache = {nullid: nullrev}
810 cl._nodepos = None
810 cl._nodepos = None
811
811
812
812
813 @command(b'perfheads', formatteropts)
813 @command(b'perfheads', formatteropts)
814 def perfheads(ui, repo, **opts):
814 def perfheads(ui, repo, **opts):
815 """benchmark the computation of a changelog heads"""
815 """benchmark the computation of a changelog heads"""
816 opts = _byteskwargs(opts)
816 opts = _byteskwargs(opts)
817 timer, fm = gettimer(ui, opts)
817 timer, fm = gettimer(ui, opts)
818 cl = repo.changelog
818 cl = repo.changelog
819
819
820 def s():
820 def s():
821 clearcaches(cl)
821 clearcaches(cl)
822
822
823 def d():
823 def d():
824 len(cl.headrevs())
824 len(cl.headrevs())
825
825
826 timer(d, setup=s)
826 timer(d, setup=s)
827 fm.end()
827 fm.end()
828
828
829
829
830 @command(
830 @command(
831 b'perftags',
831 b'perftags',
832 formatteropts
832 formatteropts
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 )
834 )
835 def perftags(ui, repo, **opts):
835 def perftags(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 repocleartagscache = repocleartagscachefunc(repo)
838 repocleartagscache = repocleartagscachefunc(repo)
839 clearrevlogs = opts[b'clear_revlogs']
839 clearrevlogs = opts[b'clear_revlogs']
840
840
841 def s():
841 def s():
842 if clearrevlogs:
842 if clearrevlogs:
843 clearchangelog(repo)
843 clearchangelog(repo)
844 clearfilecache(repo.unfiltered(), 'manifest')
844 clearfilecache(repo.unfiltered(), 'manifest')
845 repocleartagscache()
845 repocleartagscache()
846
846
847 def t():
847 def t():
848 return len(repo.tags())
848 return len(repo.tags())
849
849
850 timer(t, setup=s)
850 timer(t, setup=s)
851 fm.end()
851 fm.end()
852
852
853
853
854 @command(b'perfancestors', formatteropts)
854 @command(b'perfancestors', formatteropts)
855 def perfancestors(ui, repo, **opts):
855 def perfancestors(ui, repo, **opts):
856 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
858 heads = repo.changelog.headrevs()
858 heads = repo.changelog.headrevs()
859
859
860 def d():
860 def d():
861 for a in repo.changelog.ancestors(heads):
861 for a in repo.changelog.ancestors(heads):
862 pass
862 pass
863
863
864 timer(d)
864 timer(d)
865 fm.end()
865 fm.end()
866
866
867
867
868 @command(b'perfancestorset', formatteropts)
868 @command(b'perfancestorset', formatteropts)
869 def perfancestorset(ui, repo, revset, **opts):
869 def perfancestorset(ui, repo, revset, **opts):
870 opts = _byteskwargs(opts)
870 opts = _byteskwargs(opts)
871 timer, fm = gettimer(ui, opts)
871 timer, fm = gettimer(ui, opts)
872 revs = repo.revs(revset)
872 revs = repo.revs(revset)
873 heads = repo.changelog.headrevs()
873 heads = repo.changelog.headrevs()
874
874
875 def d():
875 def d():
876 s = repo.changelog.ancestors(heads)
876 s = repo.changelog.ancestors(heads)
877 for rev in revs:
877 for rev in revs:
878 rev in s
878 rev in s
879
879
880 timer(d)
880 timer(d)
881 fm.end()
881 fm.end()
882
882
883
883
884 @command(b'perfdiscovery', formatteropts, b'PATH')
884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 def perfdiscovery(ui, repo, path, **opts):
885 def perfdiscovery(ui, repo, path, **opts):
886 """benchmark discovery between local repo and the peer at given path
886 """benchmark discovery between local repo and the peer at given path
887 """
887 """
888 repos = [repo, None]
888 repos = [repo, None]
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 path = ui.expandpath(path)
890 path = ui.expandpath(path)
891
891
892 def s():
892 def s():
893 repos[1] = hg.peer(ui, opts, path)
893 repos[1] = hg.peer(ui, opts, path)
894
894
895 def d():
895 def d():
896 setdiscovery.findcommonheads(ui, *repos)
896 setdiscovery.findcommonheads(ui, *repos)
897
897
898 timer(d, setup=s)
898 timer(d, setup=s)
899 fm.end()
899 fm.end()
900
900
901
901
902 @command(
902 @command(
903 b'perfbookmarks',
903 b'perfbookmarks',
904 formatteropts
904 formatteropts
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 )
906 )
907 def perfbookmarks(ui, repo, **opts):
907 def perfbookmarks(ui, repo, **opts):
908 """benchmark parsing bookmarks from disk to memory"""
908 """benchmark parsing bookmarks from disk to memory"""
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911
911
912 clearrevlogs = opts[b'clear_revlogs']
912 clearrevlogs = opts[b'clear_revlogs']
913
913
914 def s():
914 def s():
915 if clearrevlogs:
915 if clearrevlogs:
916 clearchangelog(repo)
916 clearchangelog(repo)
917 clearfilecache(repo, b'_bookmarks')
917 clearfilecache(repo, b'_bookmarks')
918
918
919 def d():
919 def d():
920 repo._bookmarks
920 repo._bookmarks
921
921
922 timer(d, setup=s)
922 timer(d, setup=s)
923 fm.end()
923 fm.end()
924
924
925
925
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 def perfbundleread(ui, repo, bundlepath, **opts):
927 def perfbundleread(ui, repo, bundlepath, **opts):
928 """Benchmark reading of bundle files.
928 """Benchmark reading of bundle files.
929
929
930 This command is meant to isolate the I/O part of bundle reading as
930 This command is meant to isolate the I/O part of bundle reading as
931 much as possible.
931 much as possible.
932 """
932 """
933 from mercurial import (
933 from mercurial import (
934 bundle2,
934 bundle2,
935 exchange,
935 exchange,
936 streamclone,
936 streamclone,
937 )
937 )
938
938
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940
940
941 def makebench(fn):
941 def makebench(fn):
942 def run():
942 def run():
943 with open(bundlepath, b'rb') as fh:
943 with open(bundlepath, b'rb') as fh:
944 bundle = exchange.readbundle(ui, fh, bundlepath)
944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 fn(bundle)
945 fn(bundle)
946
946
947 return run
947 return run
948
948
949 def makereadnbytes(size):
949 def makereadnbytes(size):
950 def run():
950 def run():
951 with open(bundlepath, b'rb') as fh:
951 with open(bundlepath, b'rb') as fh:
952 bundle = exchange.readbundle(ui, fh, bundlepath)
952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 while bundle.read(size):
953 while bundle.read(size):
954 pass
954 pass
955
955
956 return run
956 return run
957
957
958 def makestdioread(size):
958 def makestdioread(size):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 while fh.read(size):
961 while fh.read(size):
962 pass
962 pass
963
963
964 return run
964 return run
965
965
966 # bundle1
966 # bundle1
967
967
968 def deltaiter(bundle):
968 def deltaiter(bundle):
969 for delta in bundle.deltaiter():
969 for delta in bundle.deltaiter():
970 pass
970 pass
971
971
972 def iterchunks(bundle):
972 def iterchunks(bundle):
973 for chunk in bundle.getchunks():
973 for chunk in bundle.getchunks():
974 pass
974 pass
975
975
976 # bundle2
976 # bundle2
977
977
978 def forwardchunks(bundle):
978 def forwardchunks(bundle):
979 for chunk in bundle._forwardchunks():
979 for chunk in bundle._forwardchunks():
980 pass
980 pass
981
981
982 def iterparts(bundle):
982 def iterparts(bundle):
983 for part in bundle.iterparts():
983 for part in bundle.iterparts():
984 pass
984 pass
985
985
986 def iterpartsseekable(bundle):
986 def iterpartsseekable(bundle):
987 for part in bundle.iterparts(seekable=True):
987 for part in bundle.iterparts(seekable=True):
988 pass
988 pass
989
989
990 def seek(bundle):
990 def seek(bundle):
991 for part in bundle.iterparts(seekable=True):
991 for part in bundle.iterparts(seekable=True):
992 part.seek(0, os.SEEK_END)
992 part.seek(0, os.SEEK_END)
993
993
994 def makepartreadnbytes(size):
994 def makepartreadnbytes(size):
995 def run():
995 def run():
996 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
997 bundle = exchange.readbundle(ui, fh, bundlepath)
997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 for part in bundle.iterparts():
998 for part in bundle.iterparts():
999 while part.read(size):
999 while part.read(size):
1000 pass
1000 pass
1001
1001
1002 return run
1002 return run
1003
1003
1004 benches = [
1004 benches = [
1005 (makestdioread(8192), b'read(8k)'),
1005 (makestdioread(8192), b'read(8k)'),
1006 (makestdioread(16384), b'read(16k)'),
1006 (makestdioread(16384), b'read(16k)'),
1007 (makestdioread(32768), b'read(32k)'),
1007 (makestdioread(32768), b'read(32k)'),
1008 (makestdioread(131072), b'read(128k)'),
1008 (makestdioread(131072), b'read(128k)'),
1009 ]
1009 ]
1010
1010
1011 with open(bundlepath, b'rb') as fh:
1011 with open(bundlepath, b'rb') as fh:
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013
1013
1014 if isinstance(bundle, changegroup.cg1unpacker):
1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 benches.extend(
1015 benches.extend(
1016 [
1016 [
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 ]
1023 ]
1024 )
1024 )
1025 elif isinstance(bundle, bundle2.unbundle20):
1025 elif isinstance(bundle, bundle2.unbundle20):
1026 benches.extend(
1026 benches.extend(
1027 [
1027 [
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 (
1030 (
1031 makebench(iterpartsseekable),
1031 makebench(iterpartsseekable),
1032 b'bundle2 iterparts() seekable',
1032 b'bundle2 iterparts() seekable',
1033 ),
1033 ),
1034 (makebench(seek), b'bundle2 part seek()'),
1034 (makebench(seek), b'bundle2 part seek()'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 ]
1039 ]
1040 )
1040 )
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 raise error.Abort(b'stream clone bundles not supported')
1042 raise error.Abort(b'stream clone bundles not supported')
1043 else:
1043 else:
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045
1045
1046 for fn, title in benches:
1046 for fn, title in benches:
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 timer(fn, title=title)
1048 timer(fn, title=title)
1049 fm.end()
1049 fm.end()
1050
1050
1051
1051
1052 @command(
1052 @command(
1053 b'perfchangegroupchangelog',
1053 b'perfchangegroupchangelog',
1054 formatteropts
1054 formatteropts
1055 + [
1055 + [
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 ],
1058 ],
1059 )
1059 )
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 """Benchmark producing a changelog group for a changegroup.
1061 """Benchmark producing a changelog group for a changegroup.
1062
1062
1063 This measures the time spent processing the changelog during a
1063 This measures the time spent processing the changelog during a
1064 bundle operation. This occurs during `hg bundle` and on a server
1064 bundle operation. This occurs during `hg bundle` and on a server
1065 processing a `getbundle` wire protocol request (handles clones
1065 processing a `getbundle` wire protocol request (handles clones
1066 and pull requests).
1066 and pull requests).
1067
1067
1068 By default, all revisions are added to the changegroup.
1068 By default, all revisions are added to the changegroup.
1069 """
1069 """
1070 opts = _byteskwargs(opts)
1070 opts = _byteskwargs(opts)
1071 cl = repo.changelog
1071 cl = repo.changelog
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 bundler = changegroup.getbundler(cgversion, repo)
1073 bundler = changegroup.getbundler(cgversion, repo)
1074
1074
1075 def d():
1075 def d():
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 for chunk in chunks:
1077 for chunk in chunks:
1078 pass
1078 pass
1079
1079
1080 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1081
1081
1082 # Terminal printing can interfere with timing. So disable it.
1082 # Terminal printing can interfere with timing. So disable it.
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 timer(d)
1084 timer(d)
1085
1085
1086 fm.end()
1086 fm.end()
1087
1087
1088
1088
1089 @command(b'perfdirs', formatteropts)
1089 @command(b'perfdirs', formatteropts)
1090 def perfdirs(ui, repo, **opts):
1090 def perfdirs(ui, repo, **opts):
1091 opts = _byteskwargs(opts)
1091 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1093 dirstate = repo.dirstate
1093 dirstate = repo.dirstate
1094 b'a' in dirstate
1094 b'a' in dirstate
1095
1095
1096 def d():
1096 def d():
1097 dirstate.hasdir(b'a')
1097 dirstate.hasdir(b'a')
1098 del dirstate._map._dirs
1098 del dirstate._map._dirs
1099
1099
1100 timer(d)
1100 timer(d)
1101 fm.end()
1101 fm.end()
1102
1102
1103
1103
1104 @command(b'perfdirstate', formatteropts)
1104 @command(b'perfdirstate', [
1105 (b'', b'iteration', None,
1106 b'benchmark a full iteration for the dirstate'),
1107 ] + formatteropts)
1105 def perfdirstate(ui, repo, **opts):
1108 def perfdirstate(ui, repo, **opts):
1106 """benchmap the time necessary to load a dirstate from scratch
1109 """benchmap the time of various distate operations
1107
1110
1111 By default benchmark the time necessary to load a dirstate from scratch.
1108 The dirstate is loaded to the point were a "contains" request can be
1112 The dirstate is loaded to the point were a "contains" request can be
1109 answered.
1113 answered.
1110 """
1114 """
1111 opts = _byteskwargs(opts)
1115 opts = _byteskwargs(opts)
1112 timer, fm = gettimer(ui, opts)
1116 timer, fm = gettimer(ui, opts)
1113 b"a" in repo.dirstate
1117 b"a" in repo.dirstate
1114
1118
1119 if opts[b'iteration']:
1120 setup = None
1121 dirstate = repo.dirstate
1122 def d():
1123 for f in dirstate:
1124 pass
1125 else:
1115 def setup():
1126 def setup():
1116 repo.dirstate.invalidate()
1127 repo.dirstate.invalidate()
1117
1128
1118 def d():
1129 def d():
1119 b"a" in repo.dirstate
1130 b"a" in repo.dirstate
1120
1131
1121 timer(d, setup=setup)
1132 timer(d, setup=setup)
1122 fm.end()
1133 fm.end()
1123
1134
1124
1135
1125 @command(b'perfdirstatedirs', formatteropts)
1136 @command(b'perfdirstatedirs', formatteropts)
1126 def perfdirstatedirs(ui, repo, **opts):
1137 def perfdirstatedirs(ui, repo, **opts):
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1138 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1128 """
1139 """
1129 opts = _byteskwargs(opts)
1140 opts = _byteskwargs(opts)
1130 timer, fm = gettimer(ui, opts)
1141 timer, fm = gettimer(ui, opts)
1131 repo.dirstate.hasdir(b"a")
1142 repo.dirstate.hasdir(b"a")
1132
1143
1133 def setup():
1144 def setup():
1134 del repo.dirstate._map._dirs
1145 del repo.dirstate._map._dirs
1135
1146
1136 def d():
1147 def d():
1137 repo.dirstate.hasdir(b"a")
1148 repo.dirstate.hasdir(b"a")
1138
1149
1139 timer(d, setup=setup)
1150 timer(d, setup=setup)
1140 fm.end()
1151 fm.end()
1141
1152
1142
1153
1143 @command(b'perfdirstatefoldmap', formatteropts)
1154 @command(b'perfdirstatefoldmap', formatteropts)
1144 def perfdirstatefoldmap(ui, repo, **opts):
1155 def perfdirstatefoldmap(ui, repo, **opts):
1145 """benchmap a `dirstate._map.filefoldmap.get()` request
1156 """benchmap a `dirstate._map.filefoldmap.get()` request
1146
1157
1147 The dirstate filefoldmap cache is dropped between every request.
1158 The dirstate filefoldmap cache is dropped between every request.
1148 """
1159 """
1149 opts = _byteskwargs(opts)
1160 opts = _byteskwargs(opts)
1150 timer, fm = gettimer(ui, opts)
1161 timer, fm = gettimer(ui, opts)
1151 dirstate = repo.dirstate
1162 dirstate = repo.dirstate
1152 dirstate._map.filefoldmap.get(b'a')
1163 dirstate._map.filefoldmap.get(b'a')
1153
1164
1154 def setup():
1165 def setup():
1155 del dirstate._map.filefoldmap
1166 del dirstate._map.filefoldmap
1156
1167
1157 def d():
1168 def d():
1158 dirstate._map.filefoldmap.get(b'a')
1169 dirstate._map.filefoldmap.get(b'a')
1159
1170
1160 timer(d, setup=setup)
1171 timer(d, setup=setup)
1161 fm.end()
1172 fm.end()
1162
1173
1163
1174
1164 @command(b'perfdirfoldmap', formatteropts)
1175 @command(b'perfdirfoldmap', formatteropts)
1165 def perfdirfoldmap(ui, repo, **opts):
1176 def perfdirfoldmap(ui, repo, **opts):
1166 """benchmap a `dirstate._map.dirfoldmap.get()` request
1177 """benchmap a `dirstate._map.dirfoldmap.get()` request
1167
1178
1168 The dirstate dirfoldmap cache is dropped between every request.
1179 The dirstate dirfoldmap cache is dropped between every request.
1169 """
1180 """
1170 opts = _byteskwargs(opts)
1181 opts = _byteskwargs(opts)
1171 timer, fm = gettimer(ui, opts)
1182 timer, fm = gettimer(ui, opts)
1172 dirstate = repo.dirstate
1183 dirstate = repo.dirstate
1173 dirstate._map.dirfoldmap.get(b'a')
1184 dirstate._map.dirfoldmap.get(b'a')
1174
1185
1175 def setup():
1186 def setup():
1176 del dirstate._map.dirfoldmap
1187 del dirstate._map.dirfoldmap
1177 del dirstate._map._dirs
1188 del dirstate._map._dirs
1178
1189
1179 def d():
1190 def d():
1180 dirstate._map.dirfoldmap.get(b'a')
1191 dirstate._map.dirfoldmap.get(b'a')
1181
1192
1182 timer(d, setup=setup)
1193 timer(d, setup=setup)
1183 fm.end()
1194 fm.end()
1184
1195
1185
1196
1186 @command(b'perfdirstatewrite', formatteropts)
1197 @command(b'perfdirstatewrite', formatteropts)
1187 def perfdirstatewrite(ui, repo, **opts):
1198 def perfdirstatewrite(ui, repo, **opts):
1188 """benchmap the time it take to write a dirstate on disk
1199 """benchmap the time it take to write a dirstate on disk
1189 """
1200 """
1190 opts = _byteskwargs(opts)
1201 opts = _byteskwargs(opts)
1191 timer, fm = gettimer(ui, opts)
1202 timer, fm = gettimer(ui, opts)
1192 ds = repo.dirstate
1203 ds = repo.dirstate
1193 b"a" in ds
1204 b"a" in ds
1194
1205
1195 def setup():
1206 def setup():
1196 ds._dirty = True
1207 ds._dirty = True
1197
1208
1198 def d():
1209 def d():
1199 ds.write(repo.currenttransaction())
1210 ds.write(repo.currenttransaction())
1200
1211
1201 timer(d, setup=setup)
1212 timer(d, setup=setup)
1202 fm.end()
1213 fm.end()
1203
1214
1204
1215
1205 def _getmergerevs(repo, opts):
1216 def _getmergerevs(repo, opts):
1206 """parse command argument to return rev involved in merge
1217 """parse command argument to return rev involved in merge
1207
1218
1208 input: options dictionnary with `rev`, `from` and `bse`
1219 input: options dictionnary with `rev`, `from` and `bse`
1209 output: (localctx, otherctx, basectx)
1220 output: (localctx, otherctx, basectx)
1210 """
1221 """
1211 if opts[b'from']:
1222 if opts[b'from']:
1212 fromrev = scmutil.revsingle(repo, opts[b'from'])
1223 fromrev = scmutil.revsingle(repo, opts[b'from'])
1213 wctx = repo[fromrev]
1224 wctx = repo[fromrev]
1214 else:
1225 else:
1215 wctx = repo[None]
1226 wctx = repo[None]
1216 # we don't want working dir files to be stat'd in the benchmark, so
1227 # we don't want working dir files to be stat'd in the benchmark, so
1217 # prime that cache
1228 # prime that cache
1218 wctx.dirty()
1229 wctx.dirty()
1219 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1230 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1220 if opts[b'base']:
1231 if opts[b'base']:
1221 fromrev = scmutil.revsingle(repo, opts[b'base'])
1232 fromrev = scmutil.revsingle(repo, opts[b'base'])
1222 ancestor = repo[fromrev]
1233 ancestor = repo[fromrev]
1223 else:
1234 else:
1224 ancestor = wctx.ancestor(rctx)
1235 ancestor = wctx.ancestor(rctx)
1225 return (wctx, rctx, ancestor)
1236 return (wctx, rctx, ancestor)
1226
1237
1227
1238
1228 @command(
1239 @command(
1229 b'perfmergecalculate',
1240 b'perfmergecalculate',
1230 [
1241 [
1231 (b'r', b'rev', b'.', b'rev to merge against'),
1242 (b'r', b'rev', b'.', b'rev to merge against'),
1232 (b'', b'from', b'', b'rev to merge from'),
1243 (b'', b'from', b'', b'rev to merge from'),
1233 (b'', b'base', b'', b'the revision to use as base'),
1244 (b'', b'base', b'', b'the revision to use as base'),
1234 ]
1245 ]
1235 + formatteropts,
1246 + formatteropts,
1236 )
1247 )
1237 def perfmergecalculate(ui, repo, **opts):
1248 def perfmergecalculate(ui, repo, **opts):
1238 opts = _byteskwargs(opts)
1249 opts = _byteskwargs(opts)
1239 timer, fm = gettimer(ui, opts)
1250 timer, fm = gettimer(ui, opts)
1240
1251
1241 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1252 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1242
1253
1243 def d():
1254 def d():
1244 # acceptremote is True because we don't want prompts in the middle of
1255 # acceptremote is True because we don't want prompts in the middle of
1245 # our benchmark
1256 # our benchmark
1246 merge.calculateupdates(
1257 merge.calculateupdates(
1247 repo,
1258 repo,
1248 wctx,
1259 wctx,
1249 rctx,
1260 rctx,
1250 [ancestor],
1261 [ancestor],
1251 branchmerge=False,
1262 branchmerge=False,
1252 force=False,
1263 force=False,
1253 acceptremote=True,
1264 acceptremote=True,
1254 followcopies=True,
1265 followcopies=True,
1255 )
1266 )
1256
1267
1257 timer(d)
1268 timer(d)
1258 fm.end()
1269 fm.end()
1259
1270
1260
1271
1261 @command(
1272 @command(
1262 b'perfmergecopies',
1273 b'perfmergecopies',
1263 [
1274 [
1264 (b'r', b'rev', b'.', b'rev to merge against'),
1275 (b'r', b'rev', b'.', b'rev to merge against'),
1265 (b'', b'from', b'', b'rev to merge from'),
1276 (b'', b'from', b'', b'rev to merge from'),
1266 (b'', b'base', b'', b'the revision to use as base'),
1277 (b'', b'base', b'', b'the revision to use as base'),
1267 ]
1278 ]
1268 + formatteropts,
1279 + formatteropts,
1269 )
1280 )
1270 def perfmergecopies(ui, repo, **opts):
1281 def perfmergecopies(ui, repo, **opts):
1271 """measure runtime of `copies.mergecopies`"""
1282 """measure runtime of `copies.mergecopies`"""
1272 opts = _byteskwargs(opts)
1283 opts = _byteskwargs(opts)
1273 timer, fm = gettimer(ui, opts)
1284 timer, fm = gettimer(ui, opts)
1274 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1285 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1275
1286
1276 def d():
1287 def d():
1277 # acceptremote is True because we don't want prompts in the middle of
1288 # acceptremote is True because we don't want prompts in the middle of
1278 # our benchmark
1289 # our benchmark
1279 copies.mergecopies(repo, wctx, rctx, ancestor)
1290 copies.mergecopies(repo, wctx, rctx, ancestor)
1280
1291
1281 timer(d)
1292 timer(d)
1282 fm.end()
1293 fm.end()
1283
1294
1284
1295
1285 @command(b'perfpathcopies', [], b"REV REV")
1296 @command(b'perfpathcopies', [], b"REV REV")
1286 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1297 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1287 """benchmark the copy tracing logic"""
1298 """benchmark the copy tracing logic"""
1288 opts = _byteskwargs(opts)
1299 opts = _byteskwargs(opts)
1289 timer, fm = gettimer(ui, opts)
1300 timer, fm = gettimer(ui, opts)
1290 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1301 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1291 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1302 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1292
1303
1293 def d():
1304 def d():
1294 copies.pathcopies(ctx1, ctx2)
1305 copies.pathcopies(ctx1, ctx2)
1295
1306
1296 timer(d)
1307 timer(d)
1297 fm.end()
1308 fm.end()
1298
1309
1299
1310
1300 @command(
1311 @command(
1301 b'perfphases',
1312 b'perfphases',
1302 [(b'', b'full', False, b'include file reading time too'),],
1313 [(b'', b'full', False, b'include file reading time too'),],
1303 b"",
1314 b"",
1304 )
1315 )
1305 def perfphases(ui, repo, **opts):
1316 def perfphases(ui, repo, **opts):
1306 """benchmark phasesets computation"""
1317 """benchmark phasesets computation"""
1307 opts = _byteskwargs(opts)
1318 opts = _byteskwargs(opts)
1308 timer, fm = gettimer(ui, opts)
1319 timer, fm = gettimer(ui, opts)
1309 _phases = repo._phasecache
1320 _phases = repo._phasecache
1310 full = opts.get(b'full')
1321 full = opts.get(b'full')
1311
1322
1312 def d():
1323 def d():
1313 phases = _phases
1324 phases = _phases
1314 if full:
1325 if full:
1315 clearfilecache(repo, b'_phasecache')
1326 clearfilecache(repo, b'_phasecache')
1316 phases = repo._phasecache
1327 phases = repo._phasecache
1317 phases.invalidate()
1328 phases.invalidate()
1318 phases.loadphaserevs(repo)
1329 phases.loadphaserevs(repo)
1319
1330
1320 timer(d)
1331 timer(d)
1321 fm.end()
1332 fm.end()
1322
1333
1323
1334
1324 @command(b'perfphasesremote', [], b"[DEST]")
1335 @command(b'perfphasesremote', [], b"[DEST]")
1325 def perfphasesremote(ui, repo, dest=None, **opts):
1336 def perfphasesremote(ui, repo, dest=None, **opts):
1326 """benchmark time needed to analyse phases of the remote server"""
1337 """benchmark time needed to analyse phases of the remote server"""
1327 from mercurial.node import bin
1338 from mercurial.node import bin
1328 from mercurial import (
1339 from mercurial import (
1329 exchange,
1340 exchange,
1330 hg,
1341 hg,
1331 phases,
1342 phases,
1332 )
1343 )
1333
1344
1334 opts = _byteskwargs(opts)
1345 opts = _byteskwargs(opts)
1335 timer, fm = gettimer(ui, opts)
1346 timer, fm = gettimer(ui, opts)
1336
1347
1337 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1348 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1338 if not path:
1349 if not path:
1339 raise error.Abort(
1350 raise error.Abort(
1340 b'default repository not configured!',
1351 b'default repository not configured!',
1341 hint=b"see 'hg help config.paths'",
1352 hint=b"see 'hg help config.paths'",
1342 )
1353 )
1343 dest = path.pushloc or path.loc
1354 dest = path.pushloc or path.loc
1344 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1355 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1345 other = hg.peer(repo, opts, dest)
1356 other = hg.peer(repo, opts, dest)
1346
1357
1347 # easier to perform discovery through the operation
1358 # easier to perform discovery through the operation
1348 op = exchange.pushoperation(repo, other)
1359 op = exchange.pushoperation(repo, other)
1349 exchange._pushdiscoverychangeset(op)
1360 exchange._pushdiscoverychangeset(op)
1350
1361
1351 remotesubset = op.fallbackheads
1362 remotesubset = op.fallbackheads
1352
1363
1353 with other.commandexecutor() as e:
1364 with other.commandexecutor() as e:
1354 remotephases = e.callcommand(
1365 remotephases = e.callcommand(
1355 b'listkeys', {b'namespace': b'phases'}
1366 b'listkeys', {b'namespace': b'phases'}
1356 ).result()
1367 ).result()
1357 del other
1368 del other
1358 publishing = remotephases.get(b'publishing', False)
1369 publishing = remotephases.get(b'publishing', False)
1359 if publishing:
1370 if publishing:
1360 ui.statusnoi18n(b'publishing: yes\n')
1371 ui.statusnoi18n(b'publishing: yes\n')
1361 else:
1372 else:
1362 ui.statusnoi18n(b'publishing: no\n')
1373 ui.statusnoi18n(b'publishing: no\n')
1363
1374
1364 nodemap = repo.changelog.nodemap
1375 nodemap = repo.changelog.nodemap
1365 nonpublishroots = 0
1376 nonpublishroots = 0
1366 for nhex, phase in remotephases.iteritems():
1377 for nhex, phase in remotephases.iteritems():
1367 if nhex == b'publishing': # ignore data related to publish option
1378 if nhex == b'publishing': # ignore data related to publish option
1368 continue
1379 continue
1369 node = bin(nhex)
1380 node = bin(nhex)
1370 if node in nodemap and int(phase):
1381 if node in nodemap and int(phase):
1371 nonpublishroots += 1
1382 nonpublishroots += 1
1372 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1383 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1373 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1384 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1374
1385
1375 def d():
1386 def d():
1376 phases.remotephasessummary(repo, remotesubset, remotephases)
1387 phases.remotephasessummary(repo, remotesubset, remotephases)
1377
1388
1378 timer(d)
1389 timer(d)
1379 fm.end()
1390 fm.end()
1380
1391
1381
1392
1382 @command(
1393 @command(
1383 b'perfmanifest',
1394 b'perfmanifest',
1384 [
1395 [
1385 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1396 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1386 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1397 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1387 ]
1398 ]
1388 + formatteropts,
1399 + formatteropts,
1389 b'REV|NODE',
1400 b'REV|NODE',
1390 )
1401 )
1391 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1402 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1392 """benchmark the time to read a manifest from disk and return a usable
1403 """benchmark the time to read a manifest from disk and return a usable
1393 dict-like object
1404 dict-like object
1394
1405
1395 Manifest caches are cleared before retrieval."""
1406 Manifest caches are cleared before retrieval."""
1396 opts = _byteskwargs(opts)
1407 opts = _byteskwargs(opts)
1397 timer, fm = gettimer(ui, opts)
1408 timer, fm = gettimer(ui, opts)
1398 if not manifest_rev:
1409 if not manifest_rev:
1399 ctx = scmutil.revsingle(repo, rev, rev)
1410 ctx = scmutil.revsingle(repo, rev, rev)
1400 t = ctx.manifestnode()
1411 t = ctx.manifestnode()
1401 else:
1412 else:
1402 from mercurial.node import bin
1413 from mercurial.node import bin
1403
1414
1404 if len(rev) == 40:
1415 if len(rev) == 40:
1405 t = bin(rev)
1416 t = bin(rev)
1406 else:
1417 else:
1407 try:
1418 try:
1408 rev = int(rev)
1419 rev = int(rev)
1409
1420
1410 if util.safehasattr(repo.manifestlog, b'getstorage'):
1421 if util.safehasattr(repo.manifestlog, b'getstorage'):
1411 t = repo.manifestlog.getstorage(b'').node(rev)
1422 t = repo.manifestlog.getstorage(b'').node(rev)
1412 else:
1423 else:
1413 t = repo.manifestlog._revlog.lookup(rev)
1424 t = repo.manifestlog._revlog.lookup(rev)
1414 except ValueError:
1425 except ValueError:
1415 raise error.Abort(
1426 raise error.Abort(
1416 b'manifest revision must be integer or full node'
1427 b'manifest revision must be integer or full node'
1417 )
1428 )
1418
1429
1419 def d():
1430 def d():
1420 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1431 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1421 repo.manifestlog[t].read()
1432 repo.manifestlog[t].read()
1422
1433
1423 timer(d)
1434 timer(d)
1424 fm.end()
1435 fm.end()
1425
1436
1426
1437
1427 @command(b'perfchangeset', formatteropts)
1438 @command(b'perfchangeset', formatteropts)
1428 def perfchangeset(ui, repo, rev, **opts):
1439 def perfchangeset(ui, repo, rev, **opts):
1429 opts = _byteskwargs(opts)
1440 opts = _byteskwargs(opts)
1430 timer, fm = gettimer(ui, opts)
1441 timer, fm = gettimer(ui, opts)
1431 n = scmutil.revsingle(repo, rev).node()
1442 n = scmutil.revsingle(repo, rev).node()
1432
1443
1433 def d():
1444 def d():
1434 repo.changelog.read(n)
1445 repo.changelog.read(n)
1435 # repo.changelog._cache = None
1446 # repo.changelog._cache = None
1436
1447
1437 timer(d)
1448 timer(d)
1438 fm.end()
1449 fm.end()
1439
1450
1440
1451
1441 @command(b'perfignore', formatteropts)
1452 @command(b'perfignore', formatteropts)
1442 def perfignore(ui, repo, **opts):
1453 def perfignore(ui, repo, **opts):
1443 """benchmark operation related to computing ignore"""
1454 """benchmark operation related to computing ignore"""
1444 opts = _byteskwargs(opts)
1455 opts = _byteskwargs(opts)
1445 timer, fm = gettimer(ui, opts)
1456 timer, fm = gettimer(ui, opts)
1446 dirstate = repo.dirstate
1457 dirstate = repo.dirstate
1447
1458
1448 def setupone():
1459 def setupone():
1449 dirstate.invalidate()
1460 dirstate.invalidate()
1450 clearfilecache(dirstate, b'_ignore')
1461 clearfilecache(dirstate, b'_ignore')
1451
1462
1452 def runone():
1463 def runone():
1453 dirstate._ignore
1464 dirstate._ignore
1454
1465
1455 timer(runone, setup=setupone, title=b"load")
1466 timer(runone, setup=setupone, title=b"load")
1456 fm.end()
1467 fm.end()
1457
1468
1458
1469
1459 @command(
1470 @command(
1460 b'perfindex',
1471 b'perfindex',
1461 [
1472 [
1462 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1473 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1463 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1474 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1464 ]
1475 ]
1465 + formatteropts,
1476 + formatteropts,
1466 )
1477 )
1467 def perfindex(ui, repo, **opts):
1478 def perfindex(ui, repo, **opts):
1468 """benchmark index creation time followed by a lookup
1479 """benchmark index creation time followed by a lookup
1469
1480
1470 The default is to look `tip` up. Depending on the index implementation,
1481 The default is to look `tip` up. Depending on the index implementation,
1471 the revision looked up can matters. For example, an implementation
1482 the revision looked up can matters. For example, an implementation
1472 scanning the index will have a faster lookup time for `--rev tip` than for
1483 scanning the index will have a faster lookup time for `--rev tip` than for
1473 `--rev 0`. The number of looked up revisions and their order can also
1484 `--rev 0`. The number of looked up revisions and their order can also
1474 matters.
1485 matters.
1475
1486
1476 Example of useful set to test:
1487 Example of useful set to test:
1477 * tip
1488 * tip
1478 * 0
1489 * 0
1479 * -10:
1490 * -10:
1480 * :10
1491 * :10
1481 * -10: + :10
1492 * -10: + :10
1482 * :10: + -10:
1493 * :10: + -10:
1483 * -10000:
1494 * -10000:
1484 * -10000: + 0
1495 * -10000: + 0
1485
1496
1486 It is not currently possible to check for lookup of a missing node. For
1497 It is not currently possible to check for lookup of a missing node. For
1487 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1498 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1488 import mercurial.revlog
1499 import mercurial.revlog
1489
1500
1490 opts = _byteskwargs(opts)
1501 opts = _byteskwargs(opts)
1491 timer, fm = gettimer(ui, opts)
1502 timer, fm = gettimer(ui, opts)
1492 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1503 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1493 if opts[b'no_lookup']:
1504 if opts[b'no_lookup']:
1494 if opts['rev']:
1505 if opts['rev']:
1495 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1506 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1496 nodes = []
1507 nodes = []
1497 elif not opts[b'rev']:
1508 elif not opts[b'rev']:
1498 nodes = [repo[b"tip"].node()]
1509 nodes = [repo[b"tip"].node()]
1499 else:
1510 else:
1500 revs = scmutil.revrange(repo, opts[b'rev'])
1511 revs = scmutil.revrange(repo, opts[b'rev'])
1501 cl = repo.changelog
1512 cl = repo.changelog
1502 nodes = [cl.node(r) for r in revs]
1513 nodes = [cl.node(r) for r in revs]
1503
1514
1504 unfi = repo.unfiltered()
1515 unfi = repo.unfiltered()
1505 # find the filecache func directly
1516 # find the filecache func directly
1506 # This avoid polluting the benchmark with the filecache logic
1517 # This avoid polluting the benchmark with the filecache logic
1507 makecl = unfi.__class__.changelog.func
1518 makecl = unfi.__class__.changelog.func
1508
1519
1509 def setup():
1520 def setup():
1510 # probably not necessary, but for good measure
1521 # probably not necessary, but for good measure
1511 clearchangelog(unfi)
1522 clearchangelog(unfi)
1512
1523
1513 def d():
1524 def d():
1514 cl = makecl(unfi)
1525 cl = makecl(unfi)
1515 for n in nodes:
1526 for n in nodes:
1516 cl.rev(n)
1527 cl.rev(n)
1517
1528
1518 timer(d, setup=setup)
1529 timer(d, setup=setup)
1519 fm.end()
1530 fm.end()
1520
1531
1521
1532
1522 @command(
1533 @command(
1523 b'perfnodemap',
1534 b'perfnodemap',
1524 [
1535 [
1525 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1536 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1526 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1537 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1527 ]
1538 ]
1528 + formatteropts,
1539 + formatteropts,
1529 )
1540 )
1530 def perfnodemap(ui, repo, **opts):
1541 def perfnodemap(ui, repo, **opts):
1531 """benchmark the time necessary to look up revision from a cold nodemap
1542 """benchmark the time necessary to look up revision from a cold nodemap
1532
1543
1533 Depending on the implementation, the amount and order of revision we look
1544 Depending on the implementation, the amount and order of revision we look
1534 up can varies. Example of useful set to test:
1545 up can varies. Example of useful set to test:
1535 * tip
1546 * tip
1536 * 0
1547 * 0
1537 * -10:
1548 * -10:
1538 * :10
1549 * :10
1539 * -10: + :10
1550 * -10: + :10
1540 * :10: + -10:
1551 * :10: + -10:
1541 * -10000:
1552 * -10000:
1542 * -10000: + 0
1553 * -10000: + 0
1543
1554
1544 The command currently focus on valid binary lookup. Benchmarking for
1555 The command currently focus on valid binary lookup. Benchmarking for
1545 hexlookup, prefix lookup and missing lookup would also be valuable.
1556 hexlookup, prefix lookup and missing lookup would also be valuable.
1546 """
1557 """
1547 import mercurial.revlog
1558 import mercurial.revlog
1548
1559
1549 opts = _byteskwargs(opts)
1560 opts = _byteskwargs(opts)
1550 timer, fm = gettimer(ui, opts)
1561 timer, fm = gettimer(ui, opts)
1551 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1562 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1552
1563
1553 unfi = repo.unfiltered()
1564 unfi = repo.unfiltered()
1554 clearcaches = opts['clear_caches']
1565 clearcaches = opts['clear_caches']
1555 # find the filecache func directly
1566 # find the filecache func directly
1556 # This avoid polluting the benchmark with the filecache logic
1567 # This avoid polluting the benchmark with the filecache logic
1557 makecl = unfi.__class__.changelog.func
1568 makecl = unfi.__class__.changelog.func
1558 if not opts[b'rev']:
1569 if not opts[b'rev']:
1559 raise error.Abort('use --rev to specify revisions to look up')
1570 raise error.Abort('use --rev to specify revisions to look up')
1560 revs = scmutil.revrange(repo, opts[b'rev'])
1571 revs = scmutil.revrange(repo, opts[b'rev'])
1561 cl = repo.changelog
1572 cl = repo.changelog
1562 nodes = [cl.node(r) for r in revs]
1573 nodes = [cl.node(r) for r in revs]
1563
1574
1564 # use a list to pass reference to a nodemap from one closure to the next
1575 # use a list to pass reference to a nodemap from one closure to the next
1565 nodeget = [None]
1576 nodeget = [None]
1566
1577
1567 def setnodeget():
1578 def setnodeget():
1568 # probably not necessary, but for good measure
1579 # probably not necessary, but for good measure
1569 clearchangelog(unfi)
1580 clearchangelog(unfi)
1570 nodeget[0] = makecl(unfi).nodemap.get
1581 nodeget[0] = makecl(unfi).nodemap.get
1571
1582
1572 def d():
1583 def d():
1573 get = nodeget[0]
1584 get = nodeget[0]
1574 for n in nodes:
1585 for n in nodes:
1575 get(n)
1586 get(n)
1576
1587
1577 setup = None
1588 setup = None
1578 if clearcaches:
1589 if clearcaches:
1579
1590
1580 def setup():
1591 def setup():
1581 setnodeget()
1592 setnodeget()
1582
1593
1583 else:
1594 else:
1584 setnodeget()
1595 setnodeget()
1585 d() # prewarm the data structure
1596 d() # prewarm the data structure
1586 timer(d, setup=setup)
1597 timer(d, setup=setup)
1587 fm.end()
1598 fm.end()
1588
1599
1589
1600
1590 @command(b'perfstartup', formatteropts)
1601 @command(b'perfstartup', formatteropts)
1591 def perfstartup(ui, repo, **opts):
1602 def perfstartup(ui, repo, **opts):
1592 opts = _byteskwargs(opts)
1603 opts = _byteskwargs(opts)
1593 timer, fm = gettimer(ui, opts)
1604 timer, fm = gettimer(ui, opts)
1594
1605
1595 def d():
1606 def d():
1596 if os.name != r'nt':
1607 if os.name != r'nt':
1597 os.system(
1608 os.system(
1598 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1609 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1599 )
1610 )
1600 else:
1611 else:
1601 os.environ[r'HGRCPATH'] = r' '
1612 os.environ[r'HGRCPATH'] = r' '
1602 os.system(r"%s version -q > NUL" % sys.argv[0])
1613 os.system(r"%s version -q > NUL" % sys.argv[0])
1603
1614
1604 timer(d)
1615 timer(d)
1605 fm.end()
1616 fm.end()
1606
1617
1607
1618
1608 @command(b'perfparents', formatteropts)
1619 @command(b'perfparents', formatteropts)
1609 def perfparents(ui, repo, **opts):
1620 def perfparents(ui, repo, **opts):
1610 """benchmark the time necessary to fetch one changeset's parents.
1621 """benchmark the time necessary to fetch one changeset's parents.
1611
1622
1612 The fetch is done using the `node identifier`, traversing all object layers
1623 The fetch is done using the `node identifier`, traversing all object layers
1613 from the repository object. The first N revisions will be used for this
1624 from the repository object. The first N revisions will be used for this
1614 benchmark. N is controlled by the ``perf.parentscount`` config option
1625 benchmark. N is controlled by the ``perf.parentscount`` config option
1615 (default: 1000).
1626 (default: 1000).
1616 """
1627 """
1617 opts = _byteskwargs(opts)
1628 opts = _byteskwargs(opts)
1618 timer, fm = gettimer(ui, opts)
1629 timer, fm = gettimer(ui, opts)
1619 # control the number of commits perfparents iterates over
1630 # control the number of commits perfparents iterates over
1620 # experimental config: perf.parentscount
1631 # experimental config: perf.parentscount
1621 count = getint(ui, b"perf", b"parentscount", 1000)
1632 count = getint(ui, b"perf", b"parentscount", 1000)
1622 if len(repo.changelog) < count:
1633 if len(repo.changelog) < count:
1623 raise error.Abort(b"repo needs %d commits for this test" % count)
1634 raise error.Abort(b"repo needs %d commits for this test" % count)
1624 repo = repo.unfiltered()
1635 repo = repo.unfiltered()
1625 nl = [repo.changelog.node(i) for i in _xrange(count)]
1636 nl = [repo.changelog.node(i) for i in _xrange(count)]
1626
1637
1627 def d():
1638 def d():
1628 for n in nl:
1639 for n in nl:
1629 repo.changelog.parents(n)
1640 repo.changelog.parents(n)
1630
1641
1631 timer(d)
1642 timer(d)
1632 fm.end()
1643 fm.end()
1633
1644
1634
1645
1635 @command(b'perfctxfiles', formatteropts)
1646 @command(b'perfctxfiles', formatteropts)
1636 def perfctxfiles(ui, repo, x, **opts):
1647 def perfctxfiles(ui, repo, x, **opts):
1637 opts = _byteskwargs(opts)
1648 opts = _byteskwargs(opts)
1638 x = int(x)
1649 x = int(x)
1639 timer, fm = gettimer(ui, opts)
1650 timer, fm = gettimer(ui, opts)
1640
1651
1641 def d():
1652 def d():
1642 len(repo[x].files())
1653 len(repo[x].files())
1643
1654
1644 timer(d)
1655 timer(d)
1645 fm.end()
1656 fm.end()
1646
1657
1647
1658
1648 @command(b'perfrawfiles', formatteropts)
1659 @command(b'perfrawfiles', formatteropts)
1649 def perfrawfiles(ui, repo, x, **opts):
1660 def perfrawfiles(ui, repo, x, **opts):
1650 opts = _byteskwargs(opts)
1661 opts = _byteskwargs(opts)
1651 x = int(x)
1662 x = int(x)
1652 timer, fm = gettimer(ui, opts)
1663 timer, fm = gettimer(ui, opts)
1653 cl = repo.changelog
1664 cl = repo.changelog
1654
1665
1655 def d():
1666 def d():
1656 len(cl.read(x)[3])
1667 len(cl.read(x)[3])
1657
1668
1658 timer(d)
1669 timer(d)
1659 fm.end()
1670 fm.end()
1660
1671
1661
1672
1662 @command(b'perflookup', formatteropts)
1673 @command(b'perflookup', formatteropts)
1663 def perflookup(ui, repo, rev, **opts):
1674 def perflookup(ui, repo, rev, **opts):
1664 opts = _byteskwargs(opts)
1675 opts = _byteskwargs(opts)
1665 timer, fm = gettimer(ui, opts)
1676 timer, fm = gettimer(ui, opts)
1666 timer(lambda: len(repo.lookup(rev)))
1677 timer(lambda: len(repo.lookup(rev)))
1667 fm.end()
1678 fm.end()
1668
1679
1669
1680
1670 @command(
1681 @command(
1671 b'perflinelogedits',
1682 b'perflinelogedits',
1672 [
1683 [
1673 (b'n', b'edits', 10000, b'number of edits'),
1684 (b'n', b'edits', 10000, b'number of edits'),
1674 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1685 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1675 ],
1686 ],
1676 norepo=True,
1687 norepo=True,
1677 )
1688 )
1678 def perflinelogedits(ui, **opts):
1689 def perflinelogedits(ui, **opts):
1679 from mercurial import linelog
1690 from mercurial import linelog
1680
1691
1681 opts = _byteskwargs(opts)
1692 opts = _byteskwargs(opts)
1682
1693
1683 edits = opts[b'edits']
1694 edits = opts[b'edits']
1684 maxhunklines = opts[b'max_hunk_lines']
1695 maxhunklines = opts[b'max_hunk_lines']
1685
1696
1686 maxb1 = 100000
1697 maxb1 = 100000
1687 random.seed(0)
1698 random.seed(0)
1688 randint = random.randint
1699 randint = random.randint
1689 currentlines = 0
1700 currentlines = 0
1690 arglist = []
1701 arglist = []
1691 for rev in _xrange(edits):
1702 for rev in _xrange(edits):
1692 a1 = randint(0, currentlines)
1703 a1 = randint(0, currentlines)
1693 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1704 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1694 b1 = randint(0, maxb1)
1705 b1 = randint(0, maxb1)
1695 b2 = randint(b1, b1 + maxhunklines)
1706 b2 = randint(b1, b1 + maxhunklines)
1696 currentlines += (b2 - b1) - (a2 - a1)
1707 currentlines += (b2 - b1) - (a2 - a1)
1697 arglist.append((rev, a1, a2, b1, b2))
1708 arglist.append((rev, a1, a2, b1, b2))
1698
1709
1699 def d():
1710 def d():
1700 ll = linelog.linelog()
1711 ll = linelog.linelog()
1701 for args in arglist:
1712 for args in arglist:
1702 ll.replacelines(*args)
1713 ll.replacelines(*args)
1703
1714
1704 timer, fm = gettimer(ui, opts)
1715 timer, fm = gettimer(ui, opts)
1705 timer(d)
1716 timer(d)
1706 fm.end()
1717 fm.end()
1707
1718
1708
1719
1709 @command(b'perfrevrange', formatteropts)
1720 @command(b'perfrevrange', formatteropts)
1710 def perfrevrange(ui, repo, *specs, **opts):
1721 def perfrevrange(ui, repo, *specs, **opts):
1711 opts = _byteskwargs(opts)
1722 opts = _byteskwargs(opts)
1712 timer, fm = gettimer(ui, opts)
1723 timer, fm = gettimer(ui, opts)
1713 revrange = scmutil.revrange
1724 revrange = scmutil.revrange
1714 timer(lambda: len(revrange(repo, specs)))
1725 timer(lambda: len(revrange(repo, specs)))
1715 fm.end()
1726 fm.end()
1716
1727
1717
1728
1718 @command(b'perfnodelookup', formatteropts)
1729 @command(b'perfnodelookup', formatteropts)
1719 def perfnodelookup(ui, repo, rev, **opts):
1730 def perfnodelookup(ui, repo, rev, **opts):
1720 opts = _byteskwargs(opts)
1731 opts = _byteskwargs(opts)
1721 timer, fm = gettimer(ui, opts)
1732 timer, fm = gettimer(ui, opts)
1722 import mercurial.revlog
1733 import mercurial.revlog
1723
1734
1724 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1735 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1725 n = scmutil.revsingle(repo, rev).node()
1736 n = scmutil.revsingle(repo, rev).node()
1726 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1737 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1727
1738
1728 def d():
1739 def d():
1729 cl.rev(n)
1740 cl.rev(n)
1730 clearcaches(cl)
1741 clearcaches(cl)
1731
1742
1732 timer(d)
1743 timer(d)
1733 fm.end()
1744 fm.end()
1734
1745
1735
1746
1736 @command(
1747 @command(
1737 b'perflog',
1748 b'perflog',
1738 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1749 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1739 )
1750 )
1740 def perflog(ui, repo, rev=None, **opts):
1751 def perflog(ui, repo, rev=None, **opts):
1741 opts = _byteskwargs(opts)
1752 opts = _byteskwargs(opts)
1742 if rev is None:
1753 if rev is None:
1743 rev = []
1754 rev = []
1744 timer, fm = gettimer(ui, opts)
1755 timer, fm = gettimer(ui, opts)
1745 ui.pushbuffer()
1756 ui.pushbuffer()
1746 timer(
1757 timer(
1747 lambda: commands.log(
1758 lambda: commands.log(
1748 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1759 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1749 )
1760 )
1750 )
1761 )
1751 ui.popbuffer()
1762 ui.popbuffer()
1752 fm.end()
1763 fm.end()
1753
1764
1754
1765
1755 @command(b'perfmoonwalk', formatteropts)
1766 @command(b'perfmoonwalk', formatteropts)
1756 def perfmoonwalk(ui, repo, **opts):
1767 def perfmoonwalk(ui, repo, **opts):
1757 """benchmark walking the changelog backwards
1768 """benchmark walking the changelog backwards
1758
1769
1759 This also loads the changelog data for each revision in the changelog.
1770 This also loads the changelog data for each revision in the changelog.
1760 """
1771 """
1761 opts = _byteskwargs(opts)
1772 opts = _byteskwargs(opts)
1762 timer, fm = gettimer(ui, opts)
1773 timer, fm = gettimer(ui, opts)
1763
1774
1764 def moonwalk():
1775 def moonwalk():
1765 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1776 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1766 ctx = repo[i]
1777 ctx = repo[i]
1767 ctx.branch() # read changelog data (in addition to the index)
1778 ctx.branch() # read changelog data (in addition to the index)
1768
1779
1769 timer(moonwalk)
1780 timer(moonwalk)
1770 fm.end()
1781 fm.end()
1771
1782
1772
1783
1773 @command(
1784 @command(
1774 b'perftemplating',
1785 b'perftemplating',
1775 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1786 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1776 )
1787 )
1777 def perftemplating(ui, repo, testedtemplate=None, **opts):
1788 def perftemplating(ui, repo, testedtemplate=None, **opts):
1778 """test the rendering time of a given template"""
1789 """test the rendering time of a given template"""
1779 if makelogtemplater is None:
1790 if makelogtemplater is None:
1780 raise error.Abort(
1791 raise error.Abort(
1781 b"perftemplating not available with this Mercurial",
1792 b"perftemplating not available with this Mercurial",
1782 hint=b"use 4.3 or later",
1793 hint=b"use 4.3 or later",
1783 )
1794 )
1784
1795
1785 opts = _byteskwargs(opts)
1796 opts = _byteskwargs(opts)
1786
1797
1787 nullui = ui.copy()
1798 nullui = ui.copy()
1788 nullui.fout = open(os.devnull, r'wb')
1799 nullui.fout = open(os.devnull, r'wb')
1789 nullui.disablepager()
1800 nullui.disablepager()
1790 revs = opts.get(b'rev')
1801 revs = opts.get(b'rev')
1791 if not revs:
1802 if not revs:
1792 revs = [b'all()']
1803 revs = [b'all()']
1793 revs = list(scmutil.revrange(repo, revs))
1804 revs = list(scmutil.revrange(repo, revs))
1794
1805
1795 defaulttemplate = (
1806 defaulttemplate = (
1796 b'{date|shortdate} [{rev}:{node|short}]'
1807 b'{date|shortdate} [{rev}:{node|short}]'
1797 b' {author|person}: {desc|firstline}\n'
1808 b' {author|person}: {desc|firstline}\n'
1798 )
1809 )
1799 if testedtemplate is None:
1810 if testedtemplate is None:
1800 testedtemplate = defaulttemplate
1811 testedtemplate = defaulttemplate
1801 displayer = makelogtemplater(nullui, repo, testedtemplate)
1812 displayer = makelogtemplater(nullui, repo, testedtemplate)
1802
1813
1803 def format():
1814 def format():
1804 for r in revs:
1815 for r in revs:
1805 ctx = repo[r]
1816 ctx = repo[r]
1806 displayer.show(ctx)
1817 displayer.show(ctx)
1807 displayer.flush(ctx)
1818 displayer.flush(ctx)
1808
1819
1809 timer, fm = gettimer(ui, opts)
1820 timer, fm = gettimer(ui, opts)
1810 timer(format)
1821 timer(format)
1811 fm.end()
1822 fm.end()
1812
1823
1813
1824
1814 def _displaystats(ui, opts, entries, data):
1825 def _displaystats(ui, opts, entries, data):
1815 pass
1826 pass
1816 # use a second formatter because the data are quite different, not sure
1827 # use a second formatter because the data are quite different, not sure
1817 # how it flies with the templater.
1828 # how it flies with the templater.
1818 fm = ui.formatter(b'perf-stats', opts)
1829 fm = ui.formatter(b'perf-stats', opts)
1819 for key, title in entries:
1830 for key, title in entries:
1820 values = data[key]
1831 values = data[key]
1821 nbvalues = len(data)
1832 nbvalues = len(data)
1822 values.sort()
1833 values.sort()
1823 stats = {
1834 stats = {
1824 'key': key,
1835 'key': key,
1825 'title': title,
1836 'title': title,
1826 'nbitems': len(values),
1837 'nbitems': len(values),
1827 'min': values[0][0],
1838 'min': values[0][0],
1828 '10%': values[(nbvalues * 10) // 100][0],
1839 '10%': values[(nbvalues * 10) // 100][0],
1829 '25%': values[(nbvalues * 25) // 100][0],
1840 '25%': values[(nbvalues * 25) // 100][0],
1830 '50%': values[(nbvalues * 50) // 100][0],
1841 '50%': values[(nbvalues * 50) // 100][0],
1831 '75%': values[(nbvalues * 75) // 100][0],
1842 '75%': values[(nbvalues * 75) // 100][0],
1832 '80%': values[(nbvalues * 80) // 100][0],
1843 '80%': values[(nbvalues * 80) // 100][0],
1833 '85%': values[(nbvalues * 85) // 100][0],
1844 '85%': values[(nbvalues * 85) // 100][0],
1834 '90%': values[(nbvalues * 90) // 100][0],
1845 '90%': values[(nbvalues * 90) // 100][0],
1835 '95%': values[(nbvalues * 95) // 100][0],
1846 '95%': values[(nbvalues * 95) // 100][0],
1836 '99%': values[(nbvalues * 99) // 100][0],
1847 '99%': values[(nbvalues * 99) // 100][0],
1837 'max': values[-1][0],
1848 'max': values[-1][0],
1838 }
1849 }
1839 fm.startitem()
1850 fm.startitem()
1840 fm.data(**stats)
1851 fm.data(**stats)
1841 # make node pretty for the human output
1852 # make node pretty for the human output
1842 fm.plain('### %s (%d items)\n' % (title, len(values)))
1853 fm.plain('### %s (%d items)\n' % (title, len(values)))
1843 lines = [
1854 lines = [
1844 'min',
1855 'min',
1845 '10%',
1856 '10%',
1846 '25%',
1857 '25%',
1847 '50%',
1858 '50%',
1848 '75%',
1859 '75%',
1849 '80%',
1860 '80%',
1850 '85%',
1861 '85%',
1851 '90%',
1862 '90%',
1852 '95%',
1863 '95%',
1853 '99%',
1864 '99%',
1854 'max',
1865 'max',
1855 ]
1866 ]
1856 for l in lines:
1867 for l in lines:
1857 fm.plain('%s: %s\n' % (l, stats[l]))
1868 fm.plain('%s: %s\n' % (l, stats[l]))
1858 fm.end()
1869 fm.end()
1859
1870
1860
1871
1861 @command(
1872 @command(
1862 b'perfhelper-mergecopies',
1873 b'perfhelper-mergecopies',
1863 formatteropts
1874 formatteropts
1864 + [
1875 + [
1865 (b'r', b'revs', [], b'restrict search to these revisions'),
1876 (b'r', b'revs', [], b'restrict search to these revisions'),
1866 (b'', b'timing', False, b'provides extra data (costly)'),
1877 (b'', b'timing', False, b'provides extra data (costly)'),
1867 (b'', b'stats', False, b'provides statistic about the measured data'),
1878 (b'', b'stats', False, b'provides statistic about the measured data'),
1868 ],
1879 ],
1869 )
1880 )
1870 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1881 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1871 """find statistics about potential parameters for `perfmergecopies`
1882 """find statistics about potential parameters for `perfmergecopies`
1872
1883
1873 This command find (base, p1, p2) triplet relevant for copytracing
1884 This command find (base, p1, p2) triplet relevant for copytracing
1874 benchmarking in the context of a merge. It reports values for some of the
1885 benchmarking in the context of a merge. It reports values for some of the
1875 parameters that impact merge copy tracing time during merge.
1886 parameters that impact merge copy tracing time during merge.
1876
1887
1877 If `--timing` is set, rename detection is run and the associated timing
1888 If `--timing` is set, rename detection is run and the associated timing
1878 will be reported. The extra details come at the cost of slower command
1889 will be reported. The extra details come at the cost of slower command
1879 execution.
1890 execution.
1880
1891
1881 Since rename detection is only run once, other factors might easily
1892 Since rename detection is only run once, other factors might easily
1882 affect the precision of the timing. However it should give a good
1893 affect the precision of the timing. However it should give a good
1883 approximation of which revision triplets are very costly.
1894 approximation of which revision triplets are very costly.
1884 """
1895 """
1885 opts = _byteskwargs(opts)
1896 opts = _byteskwargs(opts)
1886 fm = ui.formatter(b'perf', opts)
1897 fm = ui.formatter(b'perf', opts)
1887 dotiming = opts[b'timing']
1898 dotiming = opts[b'timing']
1888 dostats = opts[b'stats']
1899 dostats = opts[b'stats']
1889
1900
1890 output_template = [
1901 output_template = [
1891 ("base", "%(base)12s"),
1902 ("base", "%(base)12s"),
1892 ("p1", "%(p1.node)12s"),
1903 ("p1", "%(p1.node)12s"),
1893 ("p2", "%(p2.node)12s"),
1904 ("p2", "%(p2.node)12s"),
1894 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1905 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1895 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1906 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1896 ("p1.renames", "%(p1.renamedfiles)12d"),
1907 ("p1.renames", "%(p1.renamedfiles)12d"),
1897 ("p1.time", "%(p1.time)12.3f"),
1908 ("p1.time", "%(p1.time)12.3f"),
1898 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1909 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1899 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1910 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1900 ("p2.renames", "%(p2.renamedfiles)12d"),
1911 ("p2.renames", "%(p2.renamedfiles)12d"),
1901 ("p2.time", "%(p2.time)12.3f"),
1912 ("p2.time", "%(p2.time)12.3f"),
1902 ("renames", "%(nbrenamedfiles)12d"),
1913 ("renames", "%(nbrenamedfiles)12d"),
1903 ("total.time", "%(time)12.3f"),
1914 ("total.time", "%(time)12.3f"),
1904 ]
1915 ]
1905 if not dotiming:
1916 if not dotiming:
1906 output_template = [
1917 output_template = [
1907 i
1918 i
1908 for i in output_template
1919 for i in output_template
1909 if not ('time' in i[0] or 'renames' in i[0])
1920 if not ('time' in i[0] or 'renames' in i[0])
1910 ]
1921 ]
1911 header_names = [h for (h, v) in output_template]
1922 header_names = [h for (h, v) in output_template]
1912 output = ' '.join([v for (h, v) in output_template]) + '\n'
1923 output = ' '.join([v for (h, v) in output_template]) + '\n'
1913 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1924 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1914 fm.plain(header % tuple(header_names))
1925 fm.plain(header % tuple(header_names))
1915
1926
1916 if not revs:
1927 if not revs:
1917 revs = ['all()']
1928 revs = ['all()']
1918 revs = scmutil.revrange(repo, revs)
1929 revs = scmutil.revrange(repo, revs)
1919
1930
1920 if dostats:
1931 if dostats:
1921 alldata = {
1932 alldata = {
1922 'nbrevs': [],
1933 'nbrevs': [],
1923 'nbmissingfiles': [],
1934 'nbmissingfiles': [],
1924 }
1935 }
1925 if dotiming:
1936 if dotiming:
1926 alldata['parentnbrenames'] = []
1937 alldata['parentnbrenames'] = []
1927 alldata['totalnbrenames'] = []
1938 alldata['totalnbrenames'] = []
1928 alldata['parenttime'] = []
1939 alldata['parenttime'] = []
1929 alldata['totaltime'] = []
1940 alldata['totaltime'] = []
1930
1941
1931 roi = repo.revs('merge() and %ld', revs)
1942 roi = repo.revs('merge() and %ld', revs)
1932 for r in roi:
1943 for r in roi:
1933 ctx = repo[r]
1944 ctx = repo[r]
1934 p1 = ctx.p1()
1945 p1 = ctx.p1()
1935 p2 = ctx.p2()
1946 p2 = ctx.p2()
1936 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1947 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1937 for b in bases:
1948 for b in bases:
1938 b = repo[b]
1949 b = repo[b]
1939 p1missing = copies._computeforwardmissing(b, p1)
1950 p1missing = copies._computeforwardmissing(b, p1)
1940 p2missing = copies._computeforwardmissing(b, p2)
1951 p2missing = copies._computeforwardmissing(b, p2)
1941 data = {
1952 data = {
1942 b'base': b.hex(),
1953 b'base': b.hex(),
1943 b'p1.node': p1.hex(),
1954 b'p1.node': p1.hex(),
1944 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
1955 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
1945 b'p1.nbmissingfiles': len(p1missing),
1956 b'p1.nbmissingfiles': len(p1missing),
1946 b'p2.node': p2.hex(),
1957 b'p2.node': p2.hex(),
1947 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
1958 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
1948 b'p2.nbmissingfiles': len(p2missing),
1959 b'p2.nbmissingfiles': len(p2missing),
1949 }
1960 }
1950 if dostats:
1961 if dostats:
1951 if p1missing:
1962 if p1missing:
1952 alldata['nbrevs'].append(
1963 alldata['nbrevs'].append(
1953 (data['p1.nbrevs'], b.hex(), p1.hex())
1964 (data['p1.nbrevs'], b.hex(), p1.hex())
1954 )
1965 )
1955 alldata['nbmissingfiles'].append(
1966 alldata['nbmissingfiles'].append(
1956 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1967 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1957 )
1968 )
1958 if p2missing:
1969 if p2missing:
1959 alldata['nbrevs'].append(
1970 alldata['nbrevs'].append(
1960 (data['p2.nbrevs'], b.hex(), p2.hex())
1971 (data['p2.nbrevs'], b.hex(), p2.hex())
1961 )
1972 )
1962 alldata['nbmissingfiles'].append(
1973 alldata['nbmissingfiles'].append(
1963 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1974 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1964 )
1975 )
1965 if dotiming:
1976 if dotiming:
1966 begin = util.timer()
1977 begin = util.timer()
1967 mergedata = copies.mergecopies(repo, p1, p2, b)
1978 mergedata = copies.mergecopies(repo, p1, p2, b)
1968 end = util.timer()
1979 end = util.timer()
1969 # not very stable timing since we did only one run
1980 # not very stable timing since we did only one run
1970 data['time'] = end - begin
1981 data['time'] = end - begin
1971 # mergedata contains five dicts: "copy", "movewithdir",
1982 # mergedata contains five dicts: "copy", "movewithdir",
1972 # "diverge", "renamedelete" and "dirmove".
1983 # "diverge", "renamedelete" and "dirmove".
1973 # The first 4 are about renamed file so lets count that.
1984 # The first 4 are about renamed file so lets count that.
1974 renames = len(mergedata[0])
1985 renames = len(mergedata[0])
1975 renames += len(mergedata[1])
1986 renames += len(mergedata[1])
1976 renames += len(mergedata[2])
1987 renames += len(mergedata[2])
1977 renames += len(mergedata[3])
1988 renames += len(mergedata[3])
1978 data['nbrenamedfiles'] = renames
1989 data['nbrenamedfiles'] = renames
1979 begin = util.timer()
1990 begin = util.timer()
1980 p1renames = copies.pathcopies(b, p1)
1991 p1renames = copies.pathcopies(b, p1)
1981 end = util.timer()
1992 end = util.timer()
1982 data['p1.time'] = end - begin
1993 data['p1.time'] = end - begin
1983 begin = util.timer()
1994 begin = util.timer()
1984 p2renames = copies.pathcopies(b, p2)
1995 p2renames = copies.pathcopies(b, p2)
1985 data['p2.time'] = end - begin
1996 data['p2.time'] = end - begin
1986 end = util.timer()
1997 end = util.timer()
1987 data['p1.renamedfiles'] = len(p1renames)
1998 data['p1.renamedfiles'] = len(p1renames)
1988 data['p2.renamedfiles'] = len(p2renames)
1999 data['p2.renamedfiles'] = len(p2renames)
1989
2000
1990 if dostats:
2001 if dostats:
1991 if p1missing:
2002 if p1missing:
1992 alldata['parentnbrenames'].append(
2003 alldata['parentnbrenames'].append(
1993 (data['p1.renamedfiles'], b.hex(), p1.hex())
2004 (data['p1.renamedfiles'], b.hex(), p1.hex())
1994 )
2005 )
1995 alldata['parenttime'].append(
2006 alldata['parenttime'].append(
1996 (data['p1.time'], b.hex(), p1.hex())
2007 (data['p1.time'], b.hex(), p1.hex())
1997 )
2008 )
1998 if p2missing:
2009 if p2missing:
1999 alldata['parentnbrenames'].append(
2010 alldata['parentnbrenames'].append(
2000 (data['p2.renamedfiles'], b.hex(), p2.hex())
2011 (data['p2.renamedfiles'], b.hex(), p2.hex())
2001 )
2012 )
2002 alldata['parenttime'].append(
2013 alldata['parenttime'].append(
2003 (data['p2.time'], b.hex(), p2.hex())
2014 (data['p2.time'], b.hex(), p2.hex())
2004 )
2015 )
2005 if p1missing or p2missing:
2016 if p1missing or p2missing:
2006 alldata['totalnbrenames'].append(
2017 alldata['totalnbrenames'].append(
2007 (
2018 (
2008 data['nbrenamedfiles'],
2019 data['nbrenamedfiles'],
2009 b.hex(),
2020 b.hex(),
2010 p1.hex(),
2021 p1.hex(),
2011 p2.hex(),
2022 p2.hex(),
2012 )
2023 )
2013 )
2024 )
2014 alldata['totaltime'].append(
2025 alldata['totaltime'].append(
2015 (data['time'], b.hex(), p1.hex(), p2.hex())
2026 (data['time'], b.hex(), p1.hex(), p2.hex())
2016 )
2027 )
2017 fm.startitem()
2028 fm.startitem()
2018 fm.data(**data)
2029 fm.data(**data)
2019 # make node pretty for the human output
2030 # make node pretty for the human output
2020 out = data.copy()
2031 out = data.copy()
2021 out['base'] = fm.hexfunc(b.node())
2032 out['base'] = fm.hexfunc(b.node())
2022 out['p1.node'] = fm.hexfunc(p1.node())
2033 out['p1.node'] = fm.hexfunc(p1.node())
2023 out['p2.node'] = fm.hexfunc(p2.node())
2034 out['p2.node'] = fm.hexfunc(p2.node())
2024 fm.plain(output % out)
2035 fm.plain(output % out)
2025
2036
2026 fm.end()
2037 fm.end()
2027 if dostats:
2038 if dostats:
2028 # use a second formatter because the data are quite different, not sure
2039 # use a second formatter because the data are quite different, not sure
2029 # how it flies with the templater.
2040 # how it flies with the templater.
2030 entries = [
2041 entries = [
2031 ('nbrevs', 'number of revision covered'),
2042 ('nbrevs', 'number of revision covered'),
2032 ('nbmissingfiles', 'number of missing files at head'),
2043 ('nbmissingfiles', 'number of missing files at head'),
2033 ]
2044 ]
2034 if dotiming:
2045 if dotiming:
2035 entries.append(
2046 entries.append(
2036 ('parentnbrenames', 'rename from one parent to base')
2047 ('parentnbrenames', 'rename from one parent to base')
2037 )
2048 )
2038 entries.append(('totalnbrenames', 'total number of renames'))
2049 entries.append(('totalnbrenames', 'total number of renames'))
2039 entries.append(('parenttime', 'time for one parent'))
2050 entries.append(('parenttime', 'time for one parent'))
2040 entries.append(('totaltime', 'time for both parents'))
2051 entries.append(('totaltime', 'time for both parents'))
2041 _displaystats(ui, opts, entries, alldata)
2052 _displaystats(ui, opts, entries, alldata)
2042
2053
2043
2054
2044 @command(
2055 @command(
2045 b'perfhelper-pathcopies',
2056 b'perfhelper-pathcopies',
2046 formatteropts
2057 formatteropts
2047 + [
2058 + [
2048 (b'r', b'revs', [], b'restrict search to these revisions'),
2059 (b'r', b'revs', [], b'restrict search to these revisions'),
2049 (b'', b'timing', False, b'provides extra data (costly)'),
2060 (b'', b'timing', False, b'provides extra data (costly)'),
2050 (b'', b'stats', False, b'provides statistic about the measured data'),
2061 (b'', b'stats', False, b'provides statistic about the measured data'),
2051 ],
2062 ],
2052 )
2063 )
2053 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2064 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2054 """find statistic about potential parameters for the `perftracecopies`
2065 """find statistic about potential parameters for the `perftracecopies`
2055
2066
2056 This command find source-destination pair relevant for copytracing testing.
2067 This command find source-destination pair relevant for copytracing testing.
2057 It report value for some of the parameters that impact copy tracing time.
2068 It report value for some of the parameters that impact copy tracing time.
2058
2069
2059 If `--timing` is set, rename detection is run and the associated timing
2070 If `--timing` is set, rename detection is run and the associated timing
2060 will be reported. The extra details comes at the cost of a slower command
2071 will be reported. The extra details comes at the cost of a slower command
2061 execution.
2072 execution.
2062
2073
2063 Since the rename detection is only run once, other factors might easily
2074 Since the rename detection is only run once, other factors might easily
2064 affect the precision of the timing. However it should give a good
2075 affect the precision of the timing. However it should give a good
2065 approximation of which revision pairs are very costly.
2076 approximation of which revision pairs are very costly.
2066 """
2077 """
2067 opts = _byteskwargs(opts)
2078 opts = _byteskwargs(opts)
2068 fm = ui.formatter(b'perf', opts)
2079 fm = ui.formatter(b'perf', opts)
2069 dotiming = opts[b'timing']
2080 dotiming = opts[b'timing']
2070 dostats = opts[b'stats']
2081 dostats = opts[b'stats']
2071
2082
2072 if dotiming:
2083 if dotiming:
2073 header = '%12s %12s %12s %12s %12s %12s\n'
2084 header = '%12s %12s %12s %12s %12s %12s\n'
2074 output = (
2085 output = (
2075 "%(source)12s %(destination)12s "
2086 "%(source)12s %(destination)12s "
2076 "%(nbrevs)12d %(nbmissingfiles)12d "
2087 "%(nbrevs)12d %(nbmissingfiles)12d "
2077 "%(nbrenamedfiles)12d %(time)18.5f\n"
2088 "%(nbrenamedfiles)12d %(time)18.5f\n"
2078 )
2089 )
2079 header_names = (
2090 header_names = (
2080 "source",
2091 "source",
2081 "destination",
2092 "destination",
2082 "nb-revs",
2093 "nb-revs",
2083 "nb-files",
2094 "nb-files",
2084 "nb-renames",
2095 "nb-renames",
2085 "time",
2096 "time",
2086 )
2097 )
2087 fm.plain(header % header_names)
2098 fm.plain(header % header_names)
2088 else:
2099 else:
2089 header = '%12s %12s %12s %12s\n'
2100 header = '%12s %12s %12s %12s\n'
2090 output = (
2101 output = (
2091 "%(source)12s %(destination)12s "
2102 "%(source)12s %(destination)12s "
2092 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2103 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2093 )
2104 )
2094 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2105 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2095
2106
2096 if not revs:
2107 if not revs:
2097 revs = ['all()']
2108 revs = ['all()']
2098 revs = scmutil.revrange(repo, revs)
2109 revs = scmutil.revrange(repo, revs)
2099
2110
2100 if dostats:
2111 if dostats:
2101 alldata = {
2112 alldata = {
2102 'nbrevs': [],
2113 'nbrevs': [],
2103 'nbmissingfiles': [],
2114 'nbmissingfiles': [],
2104 }
2115 }
2105 if dotiming:
2116 if dotiming:
2106 alldata['nbrenames'] = []
2117 alldata['nbrenames'] = []
2107 alldata['time'] = []
2118 alldata['time'] = []
2108
2119
2109 roi = repo.revs('merge() and %ld', revs)
2120 roi = repo.revs('merge() and %ld', revs)
2110 for r in roi:
2121 for r in roi:
2111 ctx = repo[r]
2122 ctx = repo[r]
2112 p1 = ctx.p1().rev()
2123 p1 = ctx.p1().rev()
2113 p2 = ctx.p2().rev()
2124 p2 = ctx.p2().rev()
2114 bases = repo.changelog._commonancestorsheads(p1, p2)
2125 bases = repo.changelog._commonancestorsheads(p1, p2)
2115 for p in (p1, p2):
2126 for p in (p1, p2):
2116 for b in bases:
2127 for b in bases:
2117 base = repo[b]
2128 base = repo[b]
2118 parent = repo[p]
2129 parent = repo[p]
2119 missing = copies._computeforwardmissing(base, parent)
2130 missing = copies._computeforwardmissing(base, parent)
2120 if not missing:
2131 if not missing:
2121 continue
2132 continue
2122 data = {
2133 data = {
2123 b'source': base.hex(),
2134 b'source': base.hex(),
2124 b'destination': parent.hex(),
2135 b'destination': parent.hex(),
2125 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2136 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2126 b'nbmissingfiles': len(missing),
2137 b'nbmissingfiles': len(missing),
2127 }
2138 }
2128 if dostats:
2139 if dostats:
2129 alldata['nbrevs'].append(
2140 alldata['nbrevs'].append(
2130 (data['nbrevs'], base.hex(), parent.hex(),)
2141 (data['nbrevs'], base.hex(), parent.hex(),)
2131 )
2142 )
2132 alldata['nbmissingfiles'].append(
2143 alldata['nbmissingfiles'].append(
2133 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2144 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2134 )
2145 )
2135 if dotiming:
2146 if dotiming:
2136 begin = util.timer()
2147 begin = util.timer()
2137 renames = copies.pathcopies(base, parent)
2148 renames = copies.pathcopies(base, parent)
2138 end = util.timer()
2149 end = util.timer()
2139 # not very stable timing since we did only one run
2150 # not very stable timing since we did only one run
2140 data['time'] = end - begin
2151 data['time'] = end - begin
2141 data['nbrenamedfiles'] = len(renames)
2152 data['nbrenamedfiles'] = len(renames)
2142 if dostats:
2153 if dostats:
2143 alldata['time'].append(
2154 alldata['time'].append(
2144 (data['time'], base.hex(), parent.hex(),)
2155 (data['time'], base.hex(), parent.hex(),)
2145 )
2156 )
2146 alldata['nbrenames'].append(
2157 alldata['nbrenames'].append(
2147 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2158 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2148 )
2159 )
2149 fm.startitem()
2160 fm.startitem()
2150 fm.data(**data)
2161 fm.data(**data)
2151 out = data.copy()
2162 out = data.copy()
2152 out['source'] = fm.hexfunc(base.node())
2163 out['source'] = fm.hexfunc(base.node())
2153 out['destination'] = fm.hexfunc(parent.node())
2164 out['destination'] = fm.hexfunc(parent.node())
2154 fm.plain(output % out)
2165 fm.plain(output % out)
2155
2166
2156 fm.end()
2167 fm.end()
2157 if dostats:
2168 if dostats:
2158 # use a second formatter because the data are quite different, not sure
2169 # use a second formatter because the data are quite different, not sure
2159 # how it flies with the templater.
2170 # how it flies with the templater.
2160 fm = ui.formatter(b'perf', opts)
2171 fm = ui.formatter(b'perf', opts)
2161 entries = [
2172 entries = [
2162 ('nbrevs', 'number of revision covered'),
2173 ('nbrevs', 'number of revision covered'),
2163 ('nbmissingfiles', 'number of missing files at head'),
2174 ('nbmissingfiles', 'number of missing files at head'),
2164 ]
2175 ]
2165 if dotiming:
2176 if dotiming:
2166 entries.append(('nbrenames', 'renamed files'))
2177 entries.append(('nbrenames', 'renamed files'))
2167 entries.append(('time', 'time'))
2178 entries.append(('time', 'time'))
2168 _displaystats(ui, opts, entries, alldata)
2179 _displaystats(ui, opts, entries, alldata)
2169
2180
2170
2181
2171 @command(b'perfcca', formatteropts)
2182 @command(b'perfcca', formatteropts)
2172 def perfcca(ui, repo, **opts):
2183 def perfcca(ui, repo, **opts):
2173 opts = _byteskwargs(opts)
2184 opts = _byteskwargs(opts)
2174 timer, fm = gettimer(ui, opts)
2185 timer, fm = gettimer(ui, opts)
2175 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2186 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2176 fm.end()
2187 fm.end()
2177
2188
2178
2189
2179 @command(b'perffncacheload', formatteropts)
2190 @command(b'perffncacheload', formatteropts)
2180 def perffncacheload(ui, repo, **opts):
2191 def perffncacheload(ui, repo, **opts):
2181 opts = _byteskwargs(opts)
2192 opts = _byteskwargs(opts)
2182 timer, fm = gettimer(ui, opts)
2193 timer, fm = gettimer(ui, opts)
2183 s = repo.store
2194 s = repo.store
2184
2195
2185 def d():
2196 def d():
2186 s.fncache._load()
2197 s.fncache._load()
2187
2198
2188 timer(d)
2199 timer(d)
2189 fm.end()
2200 fm.end()
2190
2201
2191
2202
2192 @command(b'perffncachewrite', formatteropts)
2203 @command(b'perffncachewrite', formatteropts)
2193 def perffncachewrite(ui, repo, **opts):
2204 def perffncachewrite(ui, repo, **opts):
2194 opts = _byteskwargs(opts)
2205 opts = _byteskwargs(opts)
2195 timer, fm = gettimer(ui, opts)
2206 timer, fm = gettimer(ui, opts)
2196 s = repo.store
2207 s = repo.store
2197 lock = repo.lock()
2208 lock = repo.lock()
2198 s.fncache._load()
2209 s.fncache._load()
2199 tr = repo.transaction(b'perffncachewrite')
2210 tr = repo.transaction(b'perffncachewrite')
2200 tr.addbackup(b'fncache')
2211 tr.addbackup(b'fncache')
2201
2212
2202 def d():
2213 def d():
2203 s.fncache._dirty = True
2214 s.fncache._dirty = True
2204 s.fncache.write(tr)
2215 s.fncache.write(tr)
2205
2216
2206 timer(d)
2217 timer(d)
2207 tr.close()
2218 tr.close()
2208 lock.release()
2219 lock.release()
2209 fm.end()
2220 fm.end()
2210
2221
2211
2222
2212 @command(b'perffncacheencode', formatteropts)
2223 @command(b'perffncacheencode', formatteropts)
2213 def perffncacheencode(ui, repo, **opts):
2224 def perffncacheencode(ui, repo, **opts):
2214 opts = _byteskwargs(opts)
2225 opts = _byteskwargs(opts)
2215 timer, fm = gettimer(ui, opts)
2226 timer, fm = gettimer(ui, opts)
2216 s = repo.store
2227 s = repo.store
2217 s.fncache._load()
2228 s.fncache._load()
2218
2229
2219 def d():
2230 def d():
2220 for p in s.fncache.entries:
2231 for p in s.fncache.entries:
2221 s.encode(p)
2232 s.encode(p)
2222
2233
2223 timer(d)
2234 timer(d)
2224 fm.end()
2235 fm.end()
2225
2236
2226
2237
2227 def _bdiffworker(q, blocks, xdiff, ready, done):
2238 def _bdiffworker(q, blocks, xdiff, ready, done):
2228 while not done.is_set():
2239 while not done.is_set():
2229 pair = q.get()
2240 pair = q.get()
2230 while pair is not None:
2241 while pair is not None:
2231 if xdiff:
2242 if xdiff:
2232 mdiff.bdiff.xdiffblocks(*pair)
2243 mdiff.bdiff.xdiffblocks(*pair)
2233 elif blocks:
2244 elif blocks:
2234 mdiff.bdiff.blocks(*pair)
2245 mdiff.bdiff.blocks(*pair)
2235 else:
2246 else:
2236 mdiff.textdiff(*pair)
2247 mdiff.textdiff(*pair)
2237 q.task_done()
2248 q.task_done()
2238 pair = q.get()
2249 pair = q.get()
2239 q.task_done() # for the None one
2250 q.task_done() # for the None one
2240 with ready:
2251 with ready:
2241 ready.wait()
2252 ready.wait()
2242
2253
2243
2254
2244 def _manifestrevision(repo, mnode):
2255 def _manifestrevision(repo, mnode):
2245 ml = repo.manifestlog
2256 ml = repo.manifestlog
2246
2257
2247 if util.safehasattr(ml, b'getstorage'):
2258 if util.safehasattr(ml, b'getstorage'):
2248 store = ml.getstorage(b'')
2259 store = ml.getstorage(b'')
2249 else:
2260 else:
2250 store = ml._revlog
2261 store = ml._revlog
2251
2262
2252 return store.revision(mnode)
2263 return store.revision(mnode)
2253
2264
2254
2265
2255 @command(
2266 @command(
2256 b'perfbdiff',
2267 b'perfbdiff',
2257 revlogopts
2268 revlogopts
2258 + formatteropts
2269 + formatteropts
2259 + [
2270 + [
2260 (
2271 (
2261 b'',
2272 b'',
2262 b'count',
2273 b'count',
2263 1,
2274 1,
2264 b'number of revisions to test (when using --startrev)',
2275 b'number of revisions to test (when using --startrev)',
2265 ),
2276 ),
2266 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2277 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2267 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2278 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2268 (b'', b'blocks', False, b'test computing diffs into blocks'),
2279 (b'', b'blocks', False, b'test computing diffs into blocks'),
2269 (b'', b'xdiff', False, b'use xdiff algorithm'),
2280 (b'', b'xdiff', False, b'use xdiff algorithm'),
2270 ],
2281 ],
2271 b'-c|-m|FILE REV',
2282 b'-c|-m|FILE REV',
2272 )
2283 )
2273 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2284 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2274 """benchmark a bdiff between revisions
2285 """benchmark a bdiff between revisions
2275
2286
2276 By default, benchmark a bdiff between its delta parent and itself.
2287 By default, benchmark a bdiff between its delta parent and itself.
2277
2288
2278 With ``--count``, benchmark bdiffs between delta parents and self for N
2289 With ``--count``, benchmark bdiffs between delta parents and self for N
2279 revisions starting at the specified revision.
2290 revisions starting at the specified revision.
2280
2291
2281 With ``--alldata``, assume the requested revision is a changeset and
2292 With ``--alldata``, assume the requested revision is a changeset and
2282 measure bdiffs for all changes related to that changeset (manifest
2293 measure bdiffs for all changes related to that changeset (manifest
2283 and filelogs).
2294 and filelogs).
2284 """
2295 """
2285 opts = _byteskwargs(opts)
2296 opts = _byteskwargs(opts)
2286
2297
2287 if opts[b'xdiff'] and not opts[b'blocks']:
2298 if opts[b'xdiff'] and not opts[b'blocks']:
2288 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2299 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2289
2300
2290 if opts[b'alldata']:
2301 if opts[b'alldata']:
2291 opts[b'changelog'] = True
2302 opts[b'changelog'] = True
2292
2303
2293 if opts.get(b'changelog') or opts.get(b'manifest'):
2304 if opts.get(b'changelog') or opts.get(b'manifest'):
2294 file_, rev = None, file_
2305 file_, rev = None, file_
2295 elif rev is None:
2306 elif rev is None:
2296 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2307 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2297
2308
2298 blocks = opts[b'blocks']
2309 blocks = opts[b'blocks']
2299 xdiff = opts[b'xdiff']
2310 xdiff = opts[b'xdiff']
2300 textpairs = []
2311 textpairs = []
2301
2312
2302 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2313 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2303
2314
2304 startrev = r.rev(r.lookup(rev))
2315 startrev = r.rev(r.lookup(rev))
2305 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2316 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2306 if opts[b'alldata']:
2317 if opts[b'alldata']:
2307 # Load revisions associated with changeset.
2318 # Load revisions associated with changeset.
2308 ctx = repo[rev]
2319 ctx = repo[rev]
2309 mtext = _manifestrevision(repo, ctx.manifestnode())
2320 mtext = _manifestrevision(repo, ctx.manifestnode())
2310 for pctx in ctx.parents():
2321 for pctx in ctx.parents():
2311 pman = _manifestrevision(repo, pctx.manifestnode())
2322 pman = _manifestrevision(repo, pctx.manifestnode())
2312 textpairs.append((pman, mtext))
2323 textpairs.append((pman, mtext))
2313
2324
2314 # Load filelog revisions by iterating manifest delta.
2325 # Load filelog revisions by iterating manifest delta.
2315 man = ctx.manifest()
2326 man = ctx.manifest()
2316 pman = ctx.p1().manifest()
2327 pman = ctx.p1().manifest()
2317 for filename, change in pman.diff(man).items():
2328 for filename, change in pman.diff(man).items():
2318 fctx = repo.file(filename)
2329 fctx = repo.file(filename)
2319 f1 = fctx.revision(change[0][0] or -1)
2330 f1 = fctx.revision(change[0][0] or -1)
2320 f2 = fctx.revision(change[1][0] or -1)
2331 f2 = fctx.revision(change[1][0] or -1)
2321 textpairs.append((f1, f2))
2332 textpairs.append((f1, f2))
2322 else:
2333 else:
2323 dp = r.deltaparent(rev)
2334 dp = r.deltaparent(rev)
2324 textpairs.append((r.revision(dp), r.revision(rev)))
2335 textpairs.append((r.revision(dp), r.revision(rev)))
2325
2336
2326 withthreads = threads > 0
2337 withthreads = threads > 0
2327 if not withthreads:
2338 if not withthreads:
2328
2339
2329 def d():
2340 def d():
2330 for pair in textpairs:
2341 for pair in textpairs:
2331 if xdiff:
2342 if xdiff:
2332 mdiff.bdiff.xdiffblocks(*pair)
2343 mdiff.bdiff.xdiffblocks(*pair)
2333 elif blocks:
2344 elif blocks:
2334 mdiff.bdiff.blocks(*pair)
2345 mdiff.bdiff.blocks(*pair)
2335 else:
2346 else:
2336 mdiff.textdiff(*pair)
2347 mdiff.textdiff(*pair)
2337
2348
2338 else:
2349 else:
2339 q = queue()
2350 q = queue()
2340 for i in _xrange(threads):
2351 for i in _xrange(threads):
2341 q.put(None)
2352 q.put(None)
2342 ready = threading.Condition()
2353 ready = threading.Condition()
2343 done = threading.Event()
2354 done = threading.Event()
2344 for i in _xrange(threads):
2355 for i in _xrange(threads):
2345 threading.Thread(
2356 threading.Thread(
2346 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2357 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2347 ).start()
2358 ).start()
2348 q.join()
2359 q.join()
2349
2360
2350 def d():
2361 def d():
2351 for pair in textpairs:
2362 for pair in textpairs:
2352 q.put(pair)
2363 q.put(pair)
2353 for i in _xrange(threads):
2364 for i in _xrange(threads):
2354 q.put(None)
2365 q.put(None)
2355 with ready:
2366 with ready:
2356 ready.notify_all()
2367 ready.notify_all()
2357 q.join()
2368 q.join()
2358
2369
2359 timer, fm = gettimer(ui, opts)
2370 timer, fm = gettimer(ui, opts)
2360 timer(d)
2371 timer(d)
2361 fm.end()
2372 fm.end()
2362
2373
2363 if withthreads:
2374 if withthreads:
2364 done.set()
2375 done.set()
2365 for i in _xrange(threads):
2376 for i in _xrange(threads):
2366 q.put(None)
2377 q.put(None)
2367 with ready:
2378 with ready:
2368 ready.notify_all()
2379 ready.notify_all()
2369
2380
2370
2381
2371 @command(
2382 @command(
2372 b'perfunidiff',
2383 b'perfunidiff',
2373 revlogopts
2384 revlogopts
2374 + formatteropts
2385 + formatteropts
2375 + [
2386 + [
2376 (
2387 (
2377 b'',
2388 b'',
2378 b'count',
2389 b'count',
2379 1,
2390 1,
2380 b'number of revisions to test (when using --startrev)',
2391 b'number of revisions to test (when using --startrev)',
2381 ),
2392 ),
2382 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2393 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2383 ],
2394 ],
2384 b'-c|-m|FILE REV',
2395 b'-c|-m|FILE REV',
2385 )
2396 )
2386 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2397 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2387 """benchmark a unified diff between revisions
2398 """benchmark a unified diff between revisions
2388
2399
2389 This doesn't include any copy tracing - it's just a unified diff
2400 This doesn't include any copy tracing - it's just a unified diff
2390 of the texts.
2401 of the texts.
2391
2402
2392 By default, benchmark a diff between its delta parent and itself.
2403 By default, benchmark a diff between its delta parent and itself.
2393
2404
2394 With ``--count``, benchmark diffs between delta parents and self for N
2405 With ``--count``, benchmark diffs between delta parents and self for N
2395 revisions starting at the specified revision.
2406 revisions starting at the specified revision.
2396
2407
2397 With ``--alldata``, assume the requested revision is a changeset and
2408 With ``--alldata``, assume the requested revision is a changeset and
2398 measure diffs for all changes related to that changeset (manifest
2409 measure diffs for all changes related to that changeset (manifest
2399 and filelogs).
2410 and filelogs).
2400 """
2411 """
2401 opts = _byteskwargs(opts)
2412 opts = _byteskwargs(opts)
2402 if opts[b'alldata']:
2413 if opts[b'alldata']:
2403 opts[b'changelog'] = True
2414 opts[b'changelog'] = True
2404
2415
2405 if opts.get(b'changelog') or opts.get(b'manifest'):
2416 if opts.get(b'changelog') or opts.get(b'manifest'):
2406 file_, rev = None, file_
2417 file_, rev = None, file_
2407 elif rev is None:
2418 elif rev is None:
2408 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2419 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2409
2420
2410 textpairs = []
2421 textpairs = []
2411
2422
2412 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2423 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2413
2424
2414 startrev = r.rev(r.lookup(rev))
2425 startrev = r.rev(r.lookup(rev))
2415 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2426 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2416 if opts[b'alldata']:
2427 if opts[b'alldata']:
2417 # Load revisions associated with changeset.
2428 # Load revisions associated with changeset.
2418 ctx = repo[rev]
2429 ctx = repo[rev]
2419 mtext = _manifestrevision(repo, ctx.manifestnode())
2430 mtext = _manifestrevision(repo, ctx.manifestnode())
2420 for pctx in ctx.parents():
2431 for pctx in ctx.parents():
2421 pman = _manifestrevision(repo, pctx.manifestnode())
2432 pman = _manifestrevision(repo, pctx.manifestnode())
2422 textpairs.append((pman, mtext))
2433 textpairs.append((pman, mtext))
2423
2434
2424 # Load filelog revisions by iterating manifest delta.
2435 # Load filelog revisions by iterating manifest delta.
2425 man = ctx.manifest()
2436 man = ctx.manifest()
2426 pman = ctx.p1().manifest()
2437 pman = ctx.p1().manifest()
2427 for filename, change in pman.diff(man).items():
2438 for filename, change in pman.diff(man).items():
2428 fctx = repo.file(filename)
2439 fctx = repo.file(filename)
2429 f1 = fctx.revision(change[0][0] or -1)
2440 f1 = fctx.revision(change[0][0] or -1)
2430 f2 = fctx.revision(change[1][0] or -1)
2441 f2 = fctx.revision(change[1][0] or -1)
2431 textpairs.append((f1, f2))
2442 textpairs.append((f1, f2))
2432 else:
2443 else:
2433 dp = r.deltaparent(rev)
2444 dp = r.deltaparent(rev)
2434 textpairs.append((r.revision(dp), r.revision(rev)))
2445 textpairs.append((r.revision(dp), r.revision(rev)))
2435
2446
2436 def d():
2447 def d():
2437 for left, right in textpairs:
2448 for left, right in textpairs:
2438 # The date strings don't matter, so we pass empty strings.
2449 # The date strings don't matter, so we pass empty strings.
2439 headerlines, hunks = mdiff.unidiff(
2450 headerlines, hunks = mdiff.unidiff(
2440 left, b'', right, b'', b'left', b'right', binary=False
2451 left, b'', right, b'', b'left', b'right', binary=False
2441 )
2452 )
2442 # consume iterators in roughly the way patch.py does
2453 # consume iterators in roughly the way patch.py does
2443 b'\n'.join(headerlines)
2454 b'\n'.join(headerlines)
2444 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2455 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2445
2456
2446 timer, fm = gettimer(ui, opts)
2457 timer, fm = gettimer(ui, opts)
2447 timer(d)
2458 timer(d)
2448 fm.end()
2459 fm.end()
2449
2460
2450
2461
2451 @command(b'perfdiffwd', formatteropts)
2462 @command(b'perfdiffwd', formatteropts)
2452 def perfdiffwd(ui, repo, **opts):
2463 def perfdiffwd(ui, repo, **opts):
2453 """Profile diff of working directory changes"""
2464 """Profile diff of working directory changes"""
2454 opts = _byteskwargs(opts)
2465 opts = _byteskwargs(opts)
2455 timer, fm = gettimer(ui, opts)
2466 timer, fm = gettimer(ui, opts)
2456 options = {
2467 options = {
2457 'w': 'ignore_all_space',
2468 'w': 'ignore_all_space',
2458 'b': 'ignore_space_change',
2469 'b': 'ignore_space_change',
2459 'B': 'ignore_blank_lines',
2470 'B': 'ignore_blank_lines',
2460 }
2471 }
2461
2472
2462 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2473 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2463 opts = dict((options[c], b'1') for c in diffopt)
2474 opts = dict((options[c], b'1') for c in diffopt)
2464
2475
2465 def d():
2476 def d():
2466 ui.pushbuffer()
2477 ui.pushbuffer()
2467 commands.diff(ui, repo, **opts)
2478 commands.diff(ui, repo, **opts)
2468 ui.popbuffer()
2479 ui.popbuffer()
2469
2480
2470 diffopt = diffopt.encode('ascii')
2481 diffopt = diffopt.encode('ascii')
2471 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2482 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2472 timer(d, title=title)
2483 timer(d, title=title)
2473 fm.end()
2484 fm.end()
2474
2485
2475
2486
2476 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2487 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2477 def perfrevlogindex(ui, repo, file_=None, **opts):
2488 def perfrevlogindex(ui, repo, file_=None, **opts):
2478 """Benchmark operations against a revlog index.
2489 """Benchmark operations against a revlog index.
2479
2490
2480 This tests constructing a revlog instance, reading index data,
2491 This tests constructing a revlog instance, reading index data,
2481 parsing index data, and performing various operations related to
2492 parsing index data, and performing various operations related to
2482 index data.
2493 index data.
2483 """
2494 """
2484
2495
2485 opts = _byteskwargs(opts)
2496 opts = _byteskwargs(opts)
2486
2497
2487 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2498 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2488
2499
2489 opener = getattr(rl, 'opener') # trick linter
2500 opener = getattr(rl, 'opener') # trick linter
2490 indexfile = rl.indexfile
2501 indexfile = rl.indexfile
2491 data = opener.read(indexfile)
2502 data = opener.read(indexfile)
2492
2503
2493 header = struct.unpack(b'>I', data[0:4])[0]
2504 header = struct.unpack(b'>I', data[0:4])[0]
2494 version = header & 0xFFFF
2505 version = header & 0xFFFF
2495 if version == 1:
2506 if version == 1:
2496 revlogio = revlog.revlogio()
2507 revlogio = revlog.revlogio()
2497 inline = header & (1 << 16)
2508 inline = header & (1 << 16)
2498 else:
2509 else:
2499 raise error.Abort(b'unsupported revlog version: %d' % version)
2510 raise error.Abort(b'unsupported revlog version: %d' % version)
2500
2511
2501 rllen = len(rl)
2512 rllen = len(rl)
2502
2513
2503 node0 = rl.node(0)
2514 node0 = rl.node(0)
2504 node25 = rl.node(rllen // 4)
2515 node25 = rl.node(rllen // 4)
2505 node50 = rl.node(rllen // 2)
2516 node50 = rl.node(rllen // 2)
2506 node75 = rl.node(rllen // 4 * 3)
2517 node75 = rl.node(rllen // 4 * 3)
2507 node100 = rl.node(rllen - 1)
2518 node100 = rl.node(rllen - 1)
2508
2519
2509 allrevs = range(rllen)
2520 allrevs = range(rllen)
2510 allrevsrev = list(reversed(allrevs))
2521 allrevsrev = list(reversed(allrevs))
2511 allnodes = [rl.node(rev) for rev in range(rllen)]
2522 allnodes = [rl.node(rev) for rev in range(rllen)]
2512 allnodesrev = list(reversed(allnodes))
2523 allnodesrev = list(reversed(allnodes))
2513
2524
2514 def constructor():
2525 def constructor():
2515 revlog.revlog(opener, indexfile)
2526 revlog.revlog(opener, indexfile)
2516
2527
2517 def read():
2528 def read():
2518 with opener(indexfile) as fh:
2529 with opener(indexfile) as fh:
2519 fh.read()
2530 fh.read()
2520
2531
2521 def parseindex():
2532 def parseindex():
2522 revlogio.parseindex(data, inline)
2533 revlogio.parseindex(data, inline)
2523
2534
2524 def getentry(revornode):
2535 def getentry(revornode):
2525 index = revlogio.parseindex(data, inline)[0]
2536 index = revlogio.parseindex(data, inline)[0]
2526 index[revornode]
2537 index[revornode]
2527
2538
2528 def getentries(revs, count=1):
2539 def getentries(revs, count=1):
2529 index = revlogio.parseindex(data, inline)[0]
2540 index = revlogio.parseindex(data, inline)[0]
2530
2541
2531 for i in range(count):
2542 for i in range(count):
2532 for rev in revs:
2543 for rev in revs:
2533 index[rev]
2544 index[rev]
2534
2545
2535 def resolvenode(node):
2546 def resolvenode(node):
2536 nodemap = revlogio.parseindex(data, inline)[1]
2547 nodemap = revlogio.parseindex(data, inline)[1]
2537 # This only works for the C code.
2548 # This only works for the C code.
2538 if nodemap is None:
2549 if nodemap is None:
2539 return
2550 return
2540
2551
2541 try:
2552 try:
2542 nodemap[node]
2553 nodemap[node]
2543 except error.RevlogError:
2554 except error.RevlogError:
2544 pass
2555 pass
2545
2556
2546 def resolvenodes(nodes, count=1):
2557 def resolvenodes(nodes, count=1):
2547 nodemap = revlogio.parseindex(data, inline)[1]
2558 nodemap = revlogio.parseindex(data, inline)[1]
2548 if nodemap is None:
2559 if nodemap is None:
2549 return
2560 return
2550
2561
2551 for i in range(count):
2562 for i in range(count):
2552 for node in nodes:
2563 for node in nodes:
2553 try:
2564 try:
2554 nodemap[node]
2565 nodemap[node]
2555 except error.RevlogError:
2566 except error.RevlogError:
2556 pass
2567 pass
2557
2568
2558 benches = [
2569 benches = [
2559 (constructor, b'revlog constructor'),
2570 (constructor, b'revlog constructor'),
2560 (read, b'read'),
2571 (read, b'read'),
2561 (parseindex, b'create index object'),
2572 (parseindex, b'create index object'),
2562 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2573 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2563 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2574 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2564 (lambda: resolvenode(node0), b'look up node at rev 0'),
2575 (lambda: resolvenode(node0), b'look up node at rev 0'),
2565 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2576 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2566 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2577 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2567 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2578 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2568 (lambda: resolvenode(node100), b'look up node at tip'),
2579 (lambda: resolvenode(node100), b'look up node at tip'),
2569 # 2x variation is to measure caching impact.
2580 # 2x variation is to measure caching impact.
2570 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2581 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2571 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2582 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2572 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2583 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2573 (
2584 (
2574 lambda: resolvenodes(allnodesrev, 2),
2585 lambda: resolvenodes(allnodesrev, 2),
2575 b'look up all nodes 2x (reverse)',
2586 b'look up all nodes 2x (reverse)',
2576 ),
2587 ),
2577 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2588 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2578 (
2589 (
2579 lambda: getentries(allrevs, 2),
2590 lambda: getentries(allrevs, 2),
2580 b'retrieve all index entries 2x (forward)',
2591 b'retrieve all index entries 2x (forward)',
2581 ),
2592 ),
2582 (
2593 (
2583 lambda: getentries(allrevsrev),
2594 lambda: getentries(allrevsrev),
2584 b'retrieve all index entries (reverse)',
2595 b'retrieve all index entries (reverse)',
2585 ),
2596 ),
2586 (
2597 (
2587 lambda: getentries(allrevsrev, 2),
2598 lambda: getentries(allrevsrev, 2),
2588 b'retrieve all index entries 2x (reverse)',
2599 b'retrieve all index entries 2x (reverse)',
2589 ),
2600 ),
2590 ]
2601 ]
2591
2602
2592 for fn, title in benches:
2603 for fn, title in benches:
2593 timer, fm = gettimer(ui, opts)
2604 timer, fm = gettimer(ui, opts)
2594 timer(fn, title=title)
2605 timer(fn, title=title)
2595 fm.end()
2606 fm.end()
2596
2607
2597
2608
2598 @command(
2609 @command(
2599 b'perfrevlogrevisions',
2610 b'perfrevlogrevisions',
2600 revlogopts
2611 revlogopts
2601 + formatteropts
2612 + formatteropts
2602 + [
2613 + [
2603 (b'd', b'dist', 100, b'distance between the revisions'),
2614 (b'd', b'dist', 100, b'distance between the revisions'),
2604 (b's', b'startrev', 0, b'revision to start reading at'),
2615 (b's', b'startrev', 0, b'revision to start reading at'),
2605 (b'', b'reverse', False, b'read in reverse'),
2616 (b'', b'reverse', False, b'read in reverse'),
2606 ],
2617 ],
2607 b'-c|-m|FILE',
2618 b'-c|-m|FILE',
2608 )
2619 )
2609 def perfrevlogrevisions(
2620 def perfrevlogrevisions(
2610 ui, repo, file_=None, startrev=0, reverse=False, **opts
2621 ui, repo, file_=None, startrev=0, reverse=False, **opts
2611 ):
2622 ):
2612 """Benchmark reading a series of revisions from a revlog.
2623 """Benchmark reading a series of revisions from a revlog.
2613
2624
2614 By default, we read every ``-d/--dist`` revision from 0 to tip of
2625 By default, we read every ``-d/--dist`` revision from 0 to tip of
2615 the specified revlog.
2626 the specified revlog.
2616
2627
2617 The start revision can be defined via ``-s/--startrev``.
2628 The start revision can be defined via ``-s/--startrev``.
2618 """
2629 """
2619 opts = _byteskwargs(opts)
2630 opts = _byteskwargs(opts)
2620
2631
2621 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2632 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2622 rllen = getlen(ui)(rl)
2633 rllen = getlen(ui)(rl)
2623
2634
2624 if startrev < 0:
2635 if startrev < 0:
2625 startrev = rllen + startrev
2636 startrev = rllen + startrev
2626
2637
2627 def d():
2638 def d():
2628 rl.clearcaches()
2639 rl.clearcaches()
2629
2640
2630 beginrev = startrev
2641 beginrev = startrev
2631 endrev = rllen
2642 endrev = rllen
2632 dist = opts[b'dist']
2643 dist = opts[b'dist']
2633
2644
2634 if reverse:
2645 if reverse:
2635 beginrev, endrev = endrev - 1, beginrev - 1
2646 beginrev, endrev = endrev - 1, beginrev - 1
2636 dist = -1 * dist
2647 dist = -1 * dist
2637
2648
2638 for x in _xrange(beginrev, endrev, dist):
2649 for x in _xrange(beginrev, endrev, dist):
2639 # Old revisions don't support passing int.
2650 # Old revisions don't support passing int.
2640 n = rl.node(x)
2651 n = rl.node(x)
2641 rl.revision(n)
2652 rl.revision(n)
2642
2653
2643 timer, fm = gettimer(ui, opts)
2654 timer, fm = gettimer(ui, opts)
2644 timer(d)
2655 timer(d)
2645 fm.end()
2656 fm.end()
2646
2657
2647
2658
2648 @command(
2659 @command(
2649 b'perfrevlogwrite',
2660 b'perfrevlogwrite',
2650 revlogopts
2661 revlogopts
2651 + formatteropts
2662 + formatteropts
2652 + [
2663 + [
2653 (b's', b'startrev', 1000, b'revision to start writing at'),
2664 (b's', b'startrev', 1000, b'revision to start writing at'),
2654 (b'', b'stoprev', -1, b'last revision to write'),
2665 (b'', b'stoprev', -1, b'last revision to write'),
2655 (b'', b'count', 3, b'number of passes to perform'),
2666 (b'', b'count', 3, b'number of passes to perform'),
2656 (b'', b'details', False, b'print timing for every revisions tested'),
2667 (b'', b'details', False, b'print timing for every revisions tested'),
2657 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2668 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2658 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2669 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2659 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2670 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2660 ],
2671 ],
2661 b'-c|-m|FILE',
2672 b'-c|-m|FILE',
2662 )
2673 )
2663 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2674 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2664 """Benchmark writing a series of revisions to a revlog.
2675 """Benchmark writing a series of revisions to a revlog.
2665
2676
2666 Possible source values are:
2677 Possible source values are:
2667 * `full`: add from a full text (default).
2678 * `full`: add from a full text (default).
2668 * `parent-1`: add from a delta to the first parent
2679 * `parent-1`: add from a delta to the first parent
2669 * `parent-2`: add from a delta to the second parent if it exists
2680 * `parent-2`: add from a delta to the second parent if it exists
2670 (use a delta from the first parent otherwise)
2681 (use a delta from the first parent otherwise)
2671 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2682 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2672 * `storage`: add from the existing precomputed deltas
2683 * `storage`: add from the existing precomputed deltas
2673
2684
2674 Note: This performance command measures performance in a custom way. As a
2685 Note: This performance command measures performance in a custom way. As a
2675 result some of the global configuration of the 'perf' command does not
2686 result some of the global configuration of the 'perf' command does not
2676 apply to it:
2687 apply to it:
2677
2688
2678 * ``pre-run``: disabled
2689 * ``pre-run``: disabled
2679
2690
2680 * ``profile-benchmark``: disabled
2691 * ``profile-benchmark``: disabled
2681
2692
2682 * ``run-limits``: disabled use --count instead
2693 * ``run-limits``: disabled use --count instead
2683 """
2694 """
2684 opts = _byteskwargs(opts)
2695 opts = _byteskwargs(opts)
2685
2696
2686 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2697 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2687 rllen = getlen(ui)(rl)
2698 rllen = getlen(ui)(rl)
2688 if startrev < 0:
2699 if startrev < 0:
2689 startrev = rllen + startrev
2700 startrev = rllen + startrev
2690 if stoprev < 0:
2701 if stoprev < 0:
2691 stoprev = rllen + stoprev
2702 stoprev = rllen + stoprev
2692
2703
2693 lazydeltabase = opts['lazydeltabase']
2704 lazydeltabase = opts['lazydeltabase']
2694 source = opts['source']
2705 source = opts['source']
2695 clearcaches = opts['clear_caches']
2706 clearcaches = opts['clear_caches']
2696 validsource = (
2707 validsource = (
2697 b'full',
2708 b'full',
2698 b'parent-1',
2709 b'parent-1',
2699 b'parent-2',
2710 b'parent-2',
2700 b'parent-smallest',
2711 b'parent-smallest',
2701 b'storage',
2712 b'storage',
2702 )
2713 )
2703 if source not in validsource:
2714 if source not in validsource:
2704 raise error.Abort('invalid source type: %s' % source)
2715 raise error.Abort('invalid source type: %s' % source)
2705
2716
2706 ### actually gather results
2717 ### actually gather results
2707 count = opts['count']
2718 count = opts['count']
2708 if count <= 0:
2719 if count <= 0:
2709 raise error.Abort('invalide run count: %d' % count)
2720 raise error.Abort('invalide run count: %d' % count)
2710 allresults = []
2721 allresults = []
2711 for c in range(count):
2722 for c in range(count):
2712 timing = _timeonewrite(
2723 timing = _timeonewrite(
2713 ui,
2724 ui,
2714 rl,
2725 rl,
2715 source,
2726 source,
2716 startrev,
2727 startrev,
2717 stoprev,
2728 stoprev,
2718 c + 1,
2729 c + 1,
2719 lazydeltabase=lazydeltabase,
2730 lazydeltabase=lazydeltabase,
2720 clearcaches=clearcaches,
2731 clearcaches=clearcaches,
2721 )
2732 )
2722 allresults.append(timing)
2733 allresults.append(timing)
2723
2734
2724 ### consolidate the results in a single list
2735 ### consolidate the results in a single list
2725 results = []
2736 results = []
2726 for idx, (rev, t) in enumerate(allresults[0]):
2737 for idx, (rev, t) in enumerate(allresults[0]):
2727 ts = [t]
2738 ts = [t]
2728 for other in allresults[1:]:
2739 for other in allresults[1:]:
2729 orev, ot = other[idx]
2740 orev, ot = other[idx]
2730 assert orev == rev
2741 assert orev == rev
2731 ts.append(ot)
2742 ts.append(ot)
2732 results.append((rev, ts))
2743 results.append((rev, ts))
2733 resultcount = len(results)
2744 resultcount = len(results)
2734
2745
2735 ### Compute and display relevant statistics
2746 ### Compute and display relevant statistics
2736
2747
2737 # get a formatter
2748 # get a formatter
2738 fm = ui.formatter(b'perf', opts)
2749 fm = ui.formatter(b'perf', opts)
2739 displayall = ui.configbool(b"perf", b"all-timing", False)
2750 displayall = ui.configbool(b"perf", b"all-timing", False)
2740
2751
2741 # print individual details if requested
2752 # print individual details if requested
2742 if opts['details']:
2753 if opts['details']:
2743 for idx, item in enumerate(results, 1):
2754 for idx, item in enumerate(results, 1):
2744 rev, data = item
2755 rev, data = item
2745 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2756 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2746 formatone(fm, data, title=title, displayall=displayall)
2757 formatone(fm, data, title=title, displayall=displayall)
2747
2758
2748 # sorts results by median time
2759 # sorts results by median time
2749 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2760 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2750 # list of (name, index) to display)
2761 # list of (name, index) to display)
2751 relevants = [
2762 relevants = [
2752 ("min", 0),
2763 ("min", 0),
2753 ("10%", resultcount * 10 // 100),
2764 ("10%", resultcount * 10 // 100),
2754 ("25%", resultcount * 25 // 100),
2765 ("25%", resultcount * 25 // 100),
2755 ("50%", resultcount * 70 // 100),
2766 ("50%", resultcount * 70 // 100),
2756 ("75%", resultcount * 75 // 100),
2767 ("75%", resultcount * 75 // 100),
2757 ("90%", resultcount * 90 // 100),
2768 ("90%", resultcount * 90 // 100),
2758 ("95%", resultcount * 95 // 100),
2769 ("95%", resultcount * 95 // 100),
2759 ("99%", resultcount * 99 // 100),
2770 ("99%", resultcount * 99 // 100),
2760 ("99.9%", resultcount * 999 // 1000),
2771 ("99.9%", resultcount * 999 // 1000),
2761 ("99.99%", resultcount * 9999 // 10000),
2772 ("99.99%", resultcount * 9999 // 10000),
2762 ("99.999%", resultcount * 99999 // 100000),
2773 ("99.999%", resultcount * 99999 // 100000),
2763 ("max", -1),
2774 ("max", -1),
2764 ]
2775 ]
2765 if not ui.quiet:
2776 if not ui.quiet:
2766 for name, idx in relevants:
2777 for name, idx in relevants:
2767 data = results[idx]
2778 data = results[idx]
2768 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2779 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2769 formatone(fm, data[1], title=title, displayall=displayall)
2780 formatone(fm, data[1], title=title, displayall=displayall)
2770
2781
2771 # XXX summing that many float will not be very precise, we ignore this fact
2782 # XXX summing that many float will not be very precise, we ignore this fact
2772 # for now
2783 # for now
2773 totaltime = []
2784 totaltime = []
2774 for item in allresults:
2785 for item in allresults:
2775 totaltime.append(
2786 totaltime.append(
2776 (
2787 (
2777 sum(x[1][0] for x in item),
2788 sum(x[1][0] for x in item),
2778 sum(x[1][1] for x in item),
2789 sum(x[1][1] for x in item),
2779 sum(x[1][2] for x in item),
2790 sum(x[1][2] for x in item),
2780 )
2791 )
2781 )
2792 )
2782 formatone(
2793 formatone(
2783 fm,
2794 fm,
2784 totaltime,
2795 totaltime,
2785 title="total time (%d revs)" % resultcount,
2796 title="total time (%d revs)" % resultcount,
2786 displayall=displayall,
2797 displayall=displayall,
2787 )
2798 )
2788 fm.end()
2799 fm.end()
2789
2800
2790
2801
2791 class _faketr(object):
2802 class _faketr(object):
2792 def add(s, x, y, z=None):
2803 def add(s, x, y, z=None):
2793 return None
2804 return None
2794
2805
2795
2806
2796 def _timeonewrite(
2807 def _timeonewrite(
2797 ui,
2808 ui,
2798 orig,
2809 orig,
2799 source,
2810 source,
2800 startrev,
2811 startrev,
2801 stoprev,
2812 stoprev,
2802 runidx=None,
2813 runidx=None,
2803 lazydeltabase=True,
2814 lazydeltabase=True,
2804 clearcaches=True,
2815 clearcaches=True,
2805 ):
2816 ):
2806 timings = []
2817 timings = []
2807 tr = _faketr()
2818 tr = _faketr()
2808 with _temprevlog(ui, orig, startrev) as dest:
2819 with _temprevlog(ui, orig, startrev) as dest:
2809 dest._lazydeltabase = lazydeltabase
2820 dest._lazydeltabase = lazydeltabase
2810 revs = list(orig.revs(startrev, stoprev))
2821 revs = list(orig.revs(startrev, stoprev))
2811 total = len(revs)
2822 total = len(revs)
2812 topic = 'adding'
2823 topic = 'adding'
2813 if runidx is not None:
2824 if runidx is not None:
2814 topic += ' (run #%d)' % runidx
2825 topic += ' (run #%d)' % runidx
2815 # Support both old and new progress API
2826 # Support both old and new progress API
2816 if util.safehasattr(ui, 'makeprogress'):
2827 if util.safehasattr(ui, 'makeprogress'):
2817 progress = ui.makeprogress(topic, unit='revs', total=total)
2828 progress = ui.makeprogress(topic, unit='revs', total=total)
2818
2829
2819 def updateprogress(pos):
2830 def updateprogress(pos):
2820 progress.update(pos)
2831 progress.update(pos)
2821
2832
2822 def completeprogress():
2833 def completeprogress():
2823 progress.complete()
2834 progress.complete()
2824
2835
2825 else:
2836 else:
2826
2837
2827 def updateprogress(pos):
2838 def updateprogress(pos):
2828 ui.progress(topic, pos, unit='revs', total=total)
2839 ui.progress(topic, pos, unit='revs', total=total)
2829
2840
2830 def completeprogress():
2841 def completeprogress():
2831 ui.progress(topic, None, unit='revs', total=total)
2842 ui.progress(topic, None, unit='revs', total=total)
2832
2843
2833 for idx, rev in enumerate(revs):
2844 for idx, rev in enumerate(revs):
2834 updateprogress(idx)
2845 updateprogress(idx)
2835 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2846 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2836 if clearcaches:
2847 if clearcaches:
2837 dest.index.clearcaches()
2848 dest.index.clearcaches()
2838 dest.clearcaches()
2849 dest.clearcaches()
2839 with timeone() as r:
2850 with timeone() as r:
2840 dest.addrawrevision(*addargs, **addkwargs)
2851 dest.addrawrevision(*addargs, **addkwargs)
2841 timings.append((rev, r[0]))
2852 timings.append((rev, r[0]))
2842 updateprogress(total)
2853 updateprogress(total)
2843 completeprogress()
2854 completeprogress()
2844 return timings
2855 return timings
2845
2856
2846
2857
2847 def _getrevisionseed(orig, rev, tr, source):
2858 def _getrevisionseed(orig, rev, tr, source):
2848 from mercurial.node import nullid
2859 from mercurial.node import nullid
2849
2860
2850 linkrev = orig.linkrev(rev)
2861 linkrev = orig.linkrev(rev)
2851 node = orig.node(rev)
2862 node = orig.node(rev)
2852 p1, p2 = orig.parents(node)
2863 p1, p2 = orig.parents(node)
2853 flags = orig.flags(rev)
2864 flags = orig.flags(rev)
2854 cachedelta = None
2865 cachedelta = None
2855 text = None
2866 text = None
2856
2867
2857 if source == b'full':
2868 if source == b'full':
2858 text = orig.revision(rev)
2869 text = orig.revision(rev)
2859 elif source == b'parent-1':
2870 elif source == b'parent-1':
2860 baserev = orig.rev(p1)
2871 baserev = orig.rev(p1)
2861 cachedelta = (baserev, orig.revdiff(p1, rev))
2872 cachedelta = (baserev, orig.revdiff(p1, rev))
2862 elif source == b'parent-2':
2873 elif source == b'parent-2':
2863 parent = p2
2874 parent = p2
2864 if p2 == nullid:
2875 if p2 == nullid:
2865 parent = p1
2876 parent = p1
2866 baserev = orig.rev(parent)
2877 baserev = orig.rev(parent)
2867 cachedelta = (baserev, orig.revdiff(parent, rev))
2878 cachedelta = (baserev, orig.revdiff(parent, rev))
2868 elif source == b'parent-smallest':
2879 elif source == b'parent-smallest':
2869 p1diff = orig.revdiff(p1, rev)
2880 p1diff = orig.revdiff(p1, rev)
2870 parent = p1
2881 parent = p1
2871 diff = p1diff
2882 diff = p1diff
2872 if p2 != nullid:
2883 if p2 != nullid:
2873 p2diff = orig.revdiff(p2, rev)
2884 p2diff = orig.revdiff(p2, rev)
2874 if len(p1diff) > len(p2diff):
2885 if len(p1diff) > len(p2diff):
2875 parent = p2
2886 parent = p2
2876 diff = p2diff
2887 diff = p2diff
2877 baserev = orig.rev(parent)
2888 baserev = orig.rev(parent)
2878 cachedelta = (baserev, diff)
2889 cachedelta = (baserev, diff)
2879 elif source == b'storage':
2890 elif source == b'storage':
2880 baserev = orig.deltaparent(rev)
2891 baserev = orig.deltaparent(rev)
2881 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2892 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2882
2893
2883 return (
2894 return (
2884 (text, tr, linkrev, p1, p2),
2895 (text, tr, linkrev, p1, p2),
2885 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2896 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2886 )
2897 )
2887
2898
2888
2899
2889 @contextlib.contextmanager
2900 @contextlib.contextmanager
2890 def _temprevlog(ui, orig, truncaterev):
2901 def _temprevlog(ui, orig, truncaterev):
2891 from mercurial import vfs as vfsmod
2902 from mercurial import vfs as vfsmod
2892
2903
2893 if orig._inline:
2904 if orig._inline:
2894 raise error.Abort('not supporting inline revlog (yet)')
2905 raise error.Abort('not supporting inline revlog (yet)')
2895 revlogkwargs = {}
2906 revlogkwargs = {}
2896 k = 'upperboundcomp'
2907 k = 'upperboundcomp'
2897 if util.safehasattr(orig, k):
2908 if util.safehasattr(orig, k):
2898 revlogkwargs[k] = getattr(orig, k)
2909 revlogkwargs[k] = getattr(orig, k)
2899
2910
2900 origindexpath = orig.opener.join(orig.indexfile)
2911 origindexpath = orig.opener.join(orig.indexfile)
2901 origdatapath = orig.opener.join(orig.datafile)
2912 origdatapath = orig.opener.join(orig.datafile)
2902 indexname = 'revlog.i'
2913 indexname = 'revlog.i'
2903 dataname = 'revlog.d'
2914 dataname = 'revlog.d'
2904
2915
2905 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2916 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2906 try:
2917 try:
2907 # copy the data file in a temporary directory
2918 # copy the data file in a temporary directory
2908 ui.debug('copying data in %s\n' % tmpdir)
2919 ui.debug('copying data in %s\n' % tmpdir)
2909 destindexpath = os.path.join(tmpdir, 'revlog.i')
2920 destindexpath = os.path.join(tmpdir, 'revlog.i')
2910 destdatapath = os.path.join(tmpdir, 'revlog.d')
2921 destdatapath = os.path.join(tmpdir, 'revlog.d')
2911 shutil.copyfile(origindexpath, destindexpath)
2922 shutil.copyfile(origindexpath, destindexpath)
2912 shutil.copyfile(origdatapath, destdatapath)
2923 shutil.copyfile(origdatapath, destdatapath)
2913
2924
2914 # remove the data we want to add again
2925 # remove the data we want to add again
2915 ui.debug('truncating data to be rewritten\n')
2926 ui.debug('truncating data to be rewritten\n')
2916 with open(destindexpath, 'ab') as index:
2927 with open(destindexpath, 'ab') as index:
2917 index.seek(0)
2928 index.seek(0)
2918 index.truncate(truncaterev * orig._io.size)
2929 index.truncate(truncaterev * orig._io.size)
2919 with open(destdatapath, 'ab') as data:
2930 with open(destdatapath, 'ab') as data:
2920 data.seek(0)
2931 data.seek(0)
2921 data.truncate(orig.start(truncaterev))
2932 data.truncate(orig.start(truncaterev))
2922
2933
2923 # instantiate a new revlog from the temporary copy
2934 # instantiate a new revlog from the temporary copy
2924 ui.debug('truncating adding to be rewritten\n')
2935 ui.debug('truncating adding to be rewritten\n')
2925 vfs = vfsmod.vfs(tmpdir)
2936 vfs = vfsmod.vfs(tmpdir)
2926 vfs.options = getattr(orig.opener, 'options', None)
2937 vfs.options = getattr(orig.opener, 'options', None)
2927
2938
2928 dest = revlog.revlog(
2939 dest = revlog.revlog(
2929 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2940 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2930 )
2941 )
2931 if dest._inline:
2942 if dest._inline:
2932 raise error.Abort('not supporting inline revlog (yet)')
2943 raise error.Abort('not supporting inline revlog (yet)')
2933 # make sure internals are initialized
2944 # make sure internals are initialized
2934 dest.revision(len(dest) - 1)
2945 dest.revision(len(dest) - 1)
2935 yield dest
2946 yield dest
2936 del dest, vfs
2947 del dest, vfs
2937 finally:
2948 finally:
2938 shutil.rmtree(tmpdir, True)
2949 shutil.rmtree(tmpdir, True)
2939
2950
2940
2951
2941 @command(
2952 @command(
2942 b'perfrevlogchunks',
2953 b'perfrevlogchunks',
2943 revlogopts
2954 revlogopts
2944 + formatteropts
2955 + formatteropts
2945 + [
2956 + [
2946 (b'e', b'engines', b'', b'compression engines to use'),
2957 (b'e', b'engines', b'', b'compression engines to use'),
2947 (b's', b'startrev', 0, b'revision to start at'),
2958 (b's', b'startrev', 0, b'revision to start at'),
2948 ],
2959 ],
2949 b'-c|-m|FILE',
2960 b'-c|-m|FILE',
2950 )
2961 )
2951 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2962 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2952 """Benchmark operations on revlog chunks.
2963 """Benchmark operations on revlog chunks.
2953
2964
2954 Logically, each revlog is a collection of fulltext revisions. However,
2965 Logically, each revlog is a collection of fulltext revisions. However,
2955 stored within each revlog are "chunks" of possibly compressed data. This
2966 stored within each revlog are "chunks" of possibly compressed data. This
2956 data needs to be read and decompressed or compressed and written.
2967 data needs to be read and decompressed or compressed and written.
2957
2968
2958 This command measures the time it takes to read+decompress and recompress
2969 This command measures the time it takes to read+decompress and recompress
2959 chunks in a revlog. It effectively isolates I/O and compression performance.
2970 chunks in a revlog. It effectively isolates I/O and compression performance.
2960 For measurements of higher-level operations like resolving revisions,
2971 For measurements of higher-level operations like resolving revisions,
2961 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2972 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2962 """
2973 """
2963 opts = _byteskwargs(opts)
2974 opts = _byteskwargs(opts)
2964
2975
2965 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2976 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2966
2977
2967 # _chunkraw was renamed to _getsegmentforrevs.
2978 # _chunkraw was renamed to _getsegmentforrevs.
2968 try:
2979 try:
2969 segmentforrevs = rl._getsegmentforrevs
2980 segmentforrevs = rl._getsegmentforrevs
2970 except AttributeError:
2981 except AttributeError:
2971 segmentforrevs = rl._chunkraw
2982 segmentforrevs = rl._chunkraw
2972
2983
2973 # Verify engines argument.
2984 # Verify engines argument.
2974 if engines:
2985 if engines:
2975 engines = set(e.strip() for e in engines.split(b','))
2986 engines = set(e.strip() for e in engines.split(b','))
2976 for engine in engines:
2987 for engine in engines:
2977 try:
2988 try:
2978 util.compressionengines[engine]
2989 util.compressionengines[engine]
2979 except KeyError:
2990 except KeyError:
2980 raise error.Abort(b'unknown compression engine: %s' % engine)
2991 raise error.Abort(b'unknown compression engine: %s' % engine)
2981 else:
2992 else:
2982 engines = []
2993 engines = []
2983 for e in util.compengines:
2994 for e in util.compengines:
2984 engine = util.compengines[e]
2995 engine = util.compengines[e]
2985 try:
2996 try:
2986 if engine.available():
2997 if engine.available():
2987 engine.revlogcompressor().compress(b'dummy')
2998 engine.revlogcompressor().compress(b'dummy')
2988 engines.append(e)
2999 engines.append(e)
2989 except NotImplementedError:
3000 except NotImplementedError:
2990 pass
3001 pass
2991
3002
2992 revs = list(rl.revs(startrev, len(rl) - 1))
3003 revs = list(rl.revs(startrev, len(rl) - 1))
2993
3004
2994 def rlfh(rl):
3005 def rlfh(rl):
2995 if rl._inline:
3006 if rl._inline:
2996 return getsvfs(repo)(rl.indexfile)
3007 return getsvfs(repo)(rl.indexfile)
2997 else:
3008 else:
2998 return getsvfs(repo)(rl.datafile)
3009 return getsvfs(repo)(rl.datafile)
2999
3010
3000 def doread():
3011 def doread():
3001 rl.clearcaches()
3012 rl.clearcaches()
3002 for rev in revs:
3013 for rev in revs:
3003 segmentforrevs(rev, rev)
3014 segmentforrevs(rev, rev)
3004
3015
3005 def doreadcachedfh():
3016 def doreadcachedfh():
3006 rl.clearcaches()
3017 rl.clearcaches()
3007 fh = rlfh(rl)
3018 fh = rlfh(rl)
3008 for rev in revs:
3019 for rev in revs:
3009 segmentforrevs(rev, rev, df=fh)
3020 segmentforrevs(rev, rev, df=fh)
3010
3021
3011 def doreadbatch():
3022 def doreadbatch():
3012 rl.clearcaches()
3023 rl.clearcaches()
3013 segmentforrevs(revs[0], revs[-1])
3024 segmentforrevs(revs[0], revs[-1])
3014
3025
3015 def doreadbatchcachedfh():
3026 def doreadbatchcachedfh():
3016 rl.clearcaches()
3027 rl.clearcaches()
3017 fh = rlfh(rl)
3028 fh = rlfh(rl)
3018 segmentforrevs(revs[0], revs[-1], df=fh)
3029 segmentforrevs(revs[0], revs[-1], df=fh)
3019
3030
3020 def dochunk():
3031 def dochunk():
3021 rl.clearcaches()
3032 rl.clearcaches()
3022 fh = rlfh(rl)
3033 fh = rlfh(rl)
3023 for rev in revs:
3034 for rev in revs:
3024 rl._chunk(rev, df=fh)
3035 rl._chunk(rev, df=fh)
3025
3036
3026 chunks = [None]
3037 chunks = [None]
3027
3038
3028 def dochunkbatch():
3039 def dochunkbatch():
3029 rl.clearcaches()
3040 rl.clearcaches()
3030 fh = rlfh(rl)
3041 fh = rlfh(rl)
3031 # Save chunks as a side-effect.
3042 # Save chunks as a side-effect.
3032 chunks[0] = rl._chunks(revs, df=fh)
3043 chunks[0] = rl._chunks(revs, df=fh)
3033
3044
3034 def docompress(compressor):
3045 def docompress(compressor):
3035 rl.clearcaches()
3046 rl.clearcaches()
3036
3047
3037 try:
3048 try:
3038 # Swap in the requested compression engine.
3049 # Swap in the requested compression engine.
3039 oldcompressor = rl._compressor
3050 oldcompressor = rl._compressor
3040 rl._compressor = compressor
3051 rl._compressor = compressor
3041 for chunk in chunks[0]:
3052 for chunk in chunks[0]:
3042 rl.compress(chunk)
3053 rl.compress(chunk)
3043 finally:
3054 finally:
3044 rl._compressor = oldcompressor
3055 rl._compressor = oldcompressor
3045
3056
3046 benches = [
3057 benches = [
3047 (lambda: doread(), b'read'),
3058 (lambda: doread(), b'read'),
3048 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3059 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3049 (lambda: doreadbatch(), b'read batch'),
3060 (lambda: doreadbatch(), b'read batch'),
3050 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3061 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3051 (lambda: dochunk(), b'chunk'),
3062 (lambda: dochunk(), b'chunk'),
3052 (lambda: dochunkbatch(), b'chunk batch'),
3063 (lambda: dochunkbatch(), b'chunk batch'),
3053 ]
3064 ]
3054
3065
3055 for engine in sorted(engines):
3066 for engine in sorted(engines):
3056 compressor = util.compengines[engine].revlogcompressor()
3067 compressor = util.compengines[engine].revlogcompressor()
3057 benches.append(
3068 benches.append(
3058 (
3069 (
3059 functools.partial(docompress, compressor),
3070 functools.partial(docompress, compressor),
3060 b'compress w/ %s' % engine,
3071 b'compress w/ %s' % engine,
3061 )
3072 )
3062 )
3073 )
3063
3074
3064 for fn, title in benches:
3075 for fn, title in benches:
3065 timer, fm = gettimer(ui, opts)
3076 timer, fm = gettimer(ui, opts)
3066 timer(fn, title=title)
3077 timer(fn, title=title)
3067 fm.end()
3078 fm.end()
3068
3079
3069
3080
3070 @command(
3081 @command(
3071 b'perfrevlogrevision',
3082 b'perfrevlogrevision',
3072 revlogopts
3083 revlogopts
3073 + formatteropts
3084 + formatteropts
3074 + [(b'', b'cache', False, b'use caches instead of clearing')],
3085 + [(b'', b'cache', False, b'use caches instead of clearing')],
3075 b'-c|-m|FILE REV',
3086 b'-c|-m|FILE REV',
3076 )
3087 )
3077 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3088 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3078 """Benchmark obtaining a revlog revision.
3089 """Benchmark obtaining a revlog revision.
3079
3090
3080 Obtaining a revlog revision consists of roughly the following steps:
3091 Obtaining a revlog revision consists of roughly the following steps:
3081
3092
3082 1. Compute the delta chain
3093 1. Compute the delta chain
3083 2. Slice the delta chain if applicable
3094 2. Slice the delta chain if applicable
3084 3. Obtain the raw chunks for that delta chain
3095 3. Obtain the raw chunks for that delta chain
3085 4. Decompress each raw chunk
3096 4. Decompress each raw chunk
3086 5. Apply binary patches to obtain fulltext
3097 5. Apply binary patches to obtain fulltext
3087 6. Verify hash of fulltext
3098 6. Verify hash of fulltext
3088
3099
3089 This command measures the time spent in each of these phases.
3100 This command measures the time spent in each of these phases.
3090 """
3101 """
3091 opts = _byteskwargs(opts)
3102 opts = _byteskwargs(opts)
3092
3103
3093 if opts.get(b'changelog') or opts.get(b'manifest'):
3104 if opts.get(b'changelog') or opts.get(b'manifest'):
3094 file_, rev = None, file_
3105 file_, rev = None, file_
3095 elif rev is None:
3106 elif rev is None:
3096 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3107 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3097
3108
3098 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3109 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3099
3110
3100 # _chunkraw was renamed to _getsegmentforrevs.
3111 # _chunkraw was renamed to _getsegmentforrevs.
3101 try:
3112 try:
3102 segmentforrevs = r._getsegmentforrevs
3113 segmentforrevs = r._getsegmentforrevs
3103 except AttributeError:
3114 except AttributeError:
3104 segmentforrevs = r._chunkraw
3115 segmentforrevs = r._chunkraw
3105
3116
3106 node = r.lookup(rev)
3117 node = r.lookup(rev)
3107 rev = r.rev(node)
3118 rev = r.rev(node)
3108
3119
3109 def getrawchunks(data, chain):
3120 def getrawchunks(data, chain):
3110 start = r.start
3121 start = r.start
3111 length = r.length
3122 length = r.length
3112 inline = r._inline
3123 inline = r._inline
3113 iosize = r._io.size
3124 iosize = r._io.size
3114 buffer = util.buffer
3125 buffer = util.buffer
3115
3126
3116 chunks = []
3127 chunks = []
3117 ladd = chunks.append
3128 ladd = chunks.append
3118 for idx, item in enumerate(chain):
3129 for idx, item in enumerate(chain):
3119 offset = start(item[0])
3130 offset = start(item[0])
3120 bits = data[idx]
3131 bits = data[idx]
3121 for rev in item:
3132 for rev in item:
3122 chunkstart = start(rev)
3133 chunkstart = start(rev)
3123 if inline:
3134 if inline:
3124 chunkstart += (rev + 1) * iosize
3135 chunkstart += (rev + 1) * iosize
3125 chunklength = length(rev)
3136 chunklength = length(rev)
3126 ladd(buffer(bits, chunkstart - offset, chunklength))
3137 ladd(buffer(bits, chunkstart - offset, chunklength))
3127
3138
3128 return chunks
3139 return chunks
3129
3140
3130 def dodeltachain(rev):
3141 def dodeltachain(rev):
3131 if not cache:
3142 if not cache:
3132 r.clearcaches()
3143 r.clearcaches()
3133 r._deltachain(rev)
3144 r._deltachain(rev)
3134
3145
3135 def doread(chain):
3146 def doread(chain):
3136 if not cache:
3147 if not cache:
3137 r.clearcaches()
3148 r.clearcaches()
3138 for item in slicedchain:
3149 for item in slicedchain:
3139 segmentforrevs(item[0], item[-1])
3150 segmentforrevs(item[0], item[-1])
3140
3151
3141 def doslice(r, chain, size):
3152 def doslice(r, chain, size):
3142 for s in slicechunk(r, chain, targetsize=size):
3153 for s in slicechunk(r, chain, targetsize=size):
3143 pass
3154 pass
3144
3155
3145 def dorawchunks(data, chain):
3156 def dorawchunks(data, chain):
3146 if not cache:
3157 if not cache:
3147 r.clearcaches()
3158 r.clearcaches()
3148 getrawchunks(data, chain)
3159 getrawchunks(data, chain)
3149
3160
3150 def dodecompress(chunks):
3161 def dodecompress(chunks):
3151 decomp = r.decompress
3162 decomp = r.decompress
3152 for chunk in chunks:
3163 for chunk in chunks:
3153 decomp(chunk)
3164 decomp(chunk)
3154
3165
3155 def dopatch(text, bins):
3166 def dopatch(text, bins):
3156 if not cache:
3167 if not cache:
3157 r.clearcaches()
3168 r.clearcaches()
3158 mdiff.patches(text, bins)
3169 mdiff.patches(text, bins)
3159
3170
3160 def dohash(text):
3171 def dohash(text):
3161 if not cache:
3172 if not cache:
3162 r.clearcaches()
3173 r.clearcaches()
3163 r.checkhash(text, node, rev=rev)
3174 r.checkhash(text, node, rev=rev)
3164
3175
3165 def dorevision():
3176 def dorevision():
3166 if not cache:
3177 if not cache:
3167 r.clearcaches()
3178 r.clearcaches()
3168 r.revision(node)
3179 r.revision(node)
3169
3180
3170 try:
3181 try:
3171 from mercurial.revlogutils.deltas import slicechunk
3182 from mercurial.revlogutils.deltas import slicechunk
3172 except ImportError:
3183 except ImportError:
3173 slicechunk = getattr(revlog, '_slicechunk', None)
3184 slicechunk = getattr(revlog, '_slicechunk', None)
3174
3185
3175 size = r.length(rev)
3186 size = r.length(rev)
3176 chain = r._deltachain(rev)[0]
3187 chain = r._deltachain(rev)[0]
3177 if not getattr(r, '_withsparseread', False):
3188 if not getattr(r, '_withsparseread', False):
3178 slicedchain = (chain,)
3189 slicedchain = (chain,)
3179 else:
3190 else:
3180 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3191 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3181 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3192 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3182 rawchunks = getrawchunks(data, slicedchain)
3193 rawchunks = getrawchunks(data, slicedchain)
3183 bins = r._chunks(chain)
3194 bins = r._chunks(chain)
3184 text = bytes(bins[0])
3195 text = bytes(bins[0])
3185 bins = bins[1:]
3196 bins = bins[1:]
3186 text = mdiff.patches(text, bins)
3197 text = mdiff.patches(text, bins)
3187
3198
3188 benches = [
3199 benches = [
3189 (lambda: dorevision(), b'full'),
3200 (lambda: dorevision(), b'full'),
3190 (lambda: dodeltachain(rev), b'deltachain'),
3201 (lambda: dodeltachain(rev), b'deltachain'),
3191 (lambda: doread(chain), b'read'),
3202 (lambda: doread(chain), b'read'),
3192 ]
3203 ]
3193
3204
3194 if getattr(r, '_withsparseread', False):
3205 if getattr(r, '_withsparseread', False):
3195 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3206 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3196 benches.append(slicing)
3207 benches.append(slicing)
3197
3208
3198 benches.extend(
3209 benches.extend(
3199 [
3210 [
3200 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3211 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3201 (lambda: dodecompress(rawchunks), b'decompress'),
3212 (lambda: dodecompress(rawchunks), b'decompress'),
3202 (lambda: dopatch(text, bins), b'patch'),
3213 (lambda: dopatch(text, bins), b'patch'),
3203 (lambda: dohash(text), b'hash'),
3214 (lambda: dohash(text), b'hash'),
3204 ]
3215 ]
3205 )
3216 )
3206
3217
3207 timer, fm = gettimer(ui, opts)
3218 timer, fm = gettimer(ui, opts)
3208 for fn, title in benches:
3219 for fn, title in benches:
3209 timer(fn, title=title)
3220 timer(fn, title=title)
3210 fm.end()
3221 fm.end()
3211
3222
3212
3223
3213 @command(
3224 @command(
3214 b'perfrevset',
3225 b'perfrevset',
3215 [
3226 [
3216 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3227 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3217 (b'', b'contexts', False, b'obtain changectx for each revision'),
3228 (b'', b'contexts', False, b'obtain changectx for each revision'),
3218 ]
3229 ]
3219 + formatteropts,
3230 + formatteropts,
3220 b"REVSET",
3231 b"REVSET",
3221 )
3232 )
3222 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3233 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3223 """benchmark the execution time of a revset
3234 """benchmark the execution time of a revset
3224
3235
3225 Use the --clean option if need to evaluate the impact of build volatile
3236 Use the --clean option if need to evaluate the impact of build volatile
3226 revisions set cache on the revset execution. Volatile cache hold filtered
3237 revisions set cache on the revset execution. Volatile cache hold filtered
3227 and obsolete related cache."""
3238 and obsolete related cache."""
3228 opts = _byteskwargs(opts)
3239 opts = _byteskwargs(opts)
3229
3240
3230 timer, fm = gettimer(ui, opts)
3241 timer, fm = gettimer(ui, opts)
3231
3242
3232 def d():
3243 def d():
3233 if clear:
3244 if clear:
3234 repo.invalidatevolatilesets()
3245 repo.invalidatevolatilesets()
3235 if contexts:
3246 if contexts:
3236 for ctx in repo.set(expr):
3247 for ctx in repo.set(expr):
3237 pass
3248 pass
3238 else:
3249 else:
3239 for r in repo.revs(expr):
3250 for r in repo.revs(expr):
3240 pass
3251 pass
3241
3252
3242 timer(d)
3253 timer(d)
3243 fm.end()
3254 fm.end()
3244
3255
3245
3256
3246 @command(
3257 @command(
3247 b'perfvolatilesets',
3258 b'perfvolatilesets',
3248 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3259 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3249 + formatteropts,
3260 + formatteropts,
3250 )
3261 )
3251 def perfvolatilesets(ui, repo, *names, **opts):
3262 def perfvolatilesets(ui, repo, *names, **opts):
3252 """benchmark the computation of various volatile set
3263 """benchmark the computation of various volatile set
3253
3264
3254 Volatile set computes element related to filtering and obsolescence."""
3265 Volatile set computes element related to filtering and obsolescence."""
3255 opts = _byteskwargs(opts)
3266 opts = _byteskwargs(opts)
3256 timer, fm = gettimer(ui, opts)
3267 timer, fm = gettimer(ui, opts)
3257 repo = repo.unfiltered()
3268 repo = repo.unfiltered()
3258
3269
3259 def getobs(name):
3270 def getobs(name):
3260 def d():
3271 def d():
3261 repo.invalidatevolatilesets()
3272 repo.invalidatevolatilesets()
3262 if opts[b'clear_obsstore']:
3273 if opts[b'clear_obsstore']:
3263 clearfilecache(repo, b'obsstore')
3274 clearfilecache(repo, b'obsstore')
3264 obsolete.getrevs(repo, name)
3275 obsolete.getrevs(repo, name)
3265
3276
3266 return d
3277 return d
3267
3278
3268 allobs = sorted(obsolete.cachefuncs)
3279 allobs = sorted(obsolete.cachefuncs)
3269 if names:
3280 if names:
3270 allobs = [n for n in allobs if n in names]
3281 allobs = [n for n in allobs if n in names]
3271
3282
3272 for name in allobs:
3283 for name in allobs:
3273 timer(getobs(name), title=name)
3284 timer(getobs(name), title=name)
3274
3285
3275 def getfiltered(name):
3286 def getfiltered(name):
3276 def d():
3287 def d():
3277 repo.invalidatevolatilesets()
3288 repo.invalidatevolatilesets()
3278 if opts[b'clear_obsstore']:
3289 if opts[b'clear_obsstore']:
3279 clearfilecache(repo, b'obsstore')
3290 clearfilecache(repo, b'obsstore')
3280 repoview.filterrevs(repo, name)
3291 repoview.filterrevs(repo, name)
3281
3292
3282 return d
3293 return d
3283
3294
3284 allfilter = sorted(repoview.filtertable)
3295 allfilter = sorted(repoview.filtertable)
3285 if names:
3296 if names:
3286 allfilter = [n for n in allfilter if n in names]
3297 allfilter = [n for n in allfilter if n in names]
3287
3298
3288 for name in allfilter:
3299 for name in allfilter:
3289 timer(getfiltered(name), title=name)
3300 timer(getfiltered(name), title=name)
3290 fm.end()
3301 fm.end()
3291
3302
3292
3303
3293 @command(
3304 @command(
3294 b'perfbranchmap',
3305 b'perfbranchmap',
3295 [
3306 [
3296 (b'f', b'full', False, b'Includes build time of subset'),
3307 (b'f', b'full', False, b'Includes build time of subset'),
3297 (
3308 (
3298 b'',
3309 b'',
3299 b'clear-revbranch',
3310 b'clear-revbranch',
3300 False,
3311 False,
3301 b'purge the revbranch cache between computation',
3312 b'purge the revbranch cache between computation',
3302 ),
3313 ),
3303 ]
3314 ]
3304 + formatteropts,
3315 + formatteropts,
3305 )
3316 )
3306 def perfbranchmap(ui, repo, *filternames, **opts):
3317 def perfbranchmap(ui, repo, *filternames, **opts):
3307 """benchmark the update of a branchmap
3318 """benchmark the update of a branchmap
3308
3319
3309 This benchmarks the full repo.branchmap() call with read and write disabled
3320 This benchmarks the full repo.branchmap() call with read and write disabled
3310 """
3321 """
3311 opts = _byteskwargs(opts)
3322 opts = _byteskwargs(opts)
3312 full = opts.get(b"full", False)
3323 full = opts.get(b"full", False)
3313 clear_revbranch = opts.get(b"clear_revbranch", False)
3324 clear_revbranch = opts.get(b"clear_revbranch", False)
3314 timer, fm = gettimer(ui, opts)
3325 timer, fm = gettimer(ui, opts)
3315
3326
3316 def getbranchmap(filtername):
3327 def getbranchmap(filtername):
3317 """generate a benchmark function for the filtername"""
3328 """generate a benchmark function for the filtername"""
3318 if filtername is None:
3329 if filtername is None:
3319 view = repo
3330 view = repo
3320 else:
3331 else:
3321 view = repo.filtered(filtername)
3332 view = repo.filtered(filtername)
3322 if util.safehasattr(view._branchcaches, '_per_filter'):
3333 if util.safehasattr(view._branchcaches, '_per_filter'):
3323 filtered = view._branchcaches._per_filter
3334 filtered = view._branchcaches._per_filter
3324 else:
3335 else:
3325 # older versions
3336 # older versions
3326 filtered = view._branchcaches
3337 filtered = view._branchcaches
3327
3338
3328 def d():
3339 def d():
3329 if clear_revbranch:
3340 if clear_revbranch:
3330 repo.revbranchcache()._clear()
3341 repo.revbranchcache()._clear()
3331 if full:
3342 if full:
3332 view._branchcaches.clear()
3343 view._branchcaches.clear()
3333 else:
3344 else:
3334 filtered.pop(filtername, None)
3345 filtered.pop(filtername, None)
3335 view.branchmap()
3346 view.branchmap()
3336
3347
3337 return d
3348 return d
3338
3349
3339 # add filter in smaller subset to bigger subset
3350 # add filter in smaller subset to bigger subset
3340 possiblefilters = set(repoview.filtertable)
3351 possiblefilters = set(repoview.filtertable)
3341 if filternames:
3352 if filternames:
3342 possiblefilters &= set(filternames)
3353 possiblefilters &= set(filternames)
3343 subsettable = getbranchmapsubsettable()
3354 subsettable = getbranchmapsubsettable()
3344 allfilters = []
3355 allfilters = []
3345 while possiblefilters:
3356 while possiblefilters:
3346 for name in possiblefilters:
3357 for name in possiblefilters:
3347 subset = subsettable.get(name)
3358 subset = subsettable.get(name)
3348 if subset not in possiblefilters:
3359 if subset not in possiblefilters:
3349 break
3360 break
3350 else:
3361 else:
3351 assert False, b'subset cycle %s!' % possiblefilters
3362 assert False, b'subset cycle %s!' % possiblefilters
3352 allfilters.append(name)
3363 allfilters.append(name)
3353 possiblefilters.remove(name)
3364 possiblefilters.remove(name)
3354
3365
3355 # warm the cache
3366 # warm the cache
3356 if not full:
3367 if not full:
3357 for name in allfilters:
3368 for name in allfilters:
3358 repo.filtered(name).branchmap()
3369 repo.filtered(name).branchmap()
3359 if not filternames or b'unfiltered' in filternames:
3370 if not filternames or b'unfiltered' in filternames:
3360 # add unfiltered
3371 # add unfiltered
3361 allfilters.append(None)
3372 allfilters.append(None)
3362
3373
3363 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3374 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3364 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3375 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3365 branchcacheread.set(classmethod(lambda *args: None))
3376 branchcacheread.set(classmethod(lambda *args: None))
3366 else:
3377 else:
3367 # older versions
3378 # older versions
3368 branchcacheread = safeattrsetter(branchmap, b'read')
3379 branchcacheread = safeattrsetter(branchmap, b'read')
3369 branchcacheread.set(lambda *args: None)
3380 branchcacheread.set(lambda *args: None)
3370 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3381 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3371 branchcachewrite.set(lambda *args: None)
3382 branchcachewrite.set(lambda *args: None)
3372 try:
3383 try:
3373 for name in allfilters:
3384 for name in allfilters:
3374 printname = name
3385 printname = name
3375 if name is None:
3386 if name is None:
3376 printname = b'unfiltered'
3387 printname = b'unfiltered'
3377 timer(getbranchmap(name), title=str(printname))
3388 timer(getbranchmap(name), title=str(printname))
3378 finally:
3389 finally:
3379 branchcacheread.restore()
3390 branchcacheread.restore()
3380 branchcachewrite.restore()
3391 branchcachewrite.restore()
3381 fm.end()
3392 fm.end()
3382
3393
3383
3394
3384 @command(
3395 @command(
3385 b'perfbranchmapupdate',
3396 b'perfbranchmapupdate',
3386 [
3397 [
3387 (b'', b'base', [], b'subset of revision to start from'),
3398 (b'', b'base', [], b'subset of revision to start from'),
3388 (b'', b'target', [], b'subset of revision to end with'),
3399 (b'', b'target', [], b'subset of revision to end with'),
3389 (b'', b'clear-caches', False, b'clear cache between each runs'),
3400 (b'', b'clear-caches', False, b'clear cache between each runs'),
3390 ]
3401 ]
3391 + formatteropts,
3402 + formatteropts,
3392 )
3403 )
3393 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3404 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3394 """benchmark branchmap update from for <base> revs to <target> revs
3405 """benchmark branchmap update from for <base> revs to <target> revs
3395
3406
3396 If `--clear-caches` is passed, the following items will be reset before
3407 If `--clear-caches` is passed, the following items will be reset before
3397 each update:
3408 each update:
3398 * the changelog instance and associated indexes
3409 * the changelog instance and associated indexes
3399 * the rev-branch-cache instance
3410 * the rev-branch-cache instance
3400
3411
3401 Examples:
3412 Examples:
3402
3413
3403 # update for the one last revision
3414 # update for the one last revision
3404 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3415 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3405
3416
3406 $ update for change coming with a new branch
3417 $ update for change coming with a new branch
3407 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3418 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3408 """
3419 """
3409 from mercurial import branchmap
3420 from mercurial import branchmap
3410 from mercurial import repoview
3421 from mercurial import repoview
3411
3422
3412 opts = _byteskwargs(opts)
3423 opts = _byteskwargs(opts)
3413 timer, fm = gettimer(ui, opts)
3424 timer, fm = gettimer(ui, opts)
3414 clearcaches = opts[b'clear_caches']
3425 clearcaches = opts[b'clear_caches']
3415 unfi = repo.unfiltered()
3426 unfi = repo.unfiltered()
3416 x = [None] # used to pass data between closure
3427 x = [None] # used to pass data between closure
3417
3428
3418 # we use a `list` here to avoid possible side effect from smartset
3429 # we use a `list` here to avoid possible side effect from smartset
3419 baserevs = list(scmutil.revrange(repo, base))
3430 baserevs = list(scmutil.revrange(repo, base))
3420 targetrevs = list(scmutil.revrange(repo, target))
3431 targetrevs = list(scmutil.revrange(repo, target))
3421 if not baserevs:
3432 if not baserevs:
3422 raise error.Abort(b'no revisions selected for --base')
3433 raise error.Abort(b'no revisions selected for --base')
3423 if not targetrevs:
3434 if not targetrevs:
3424 raise error.Abort(b'no revisions selected for --target')
3435 raise error.Abort(b'no revisions selected for --target')
3425
3436
3426 # make sure the target branchmap also contains the one in the base
3437 # make sure the target branchmap also contains the one in the base
3427 targetrevs = list(set(baserevs) | set(targetrevs))
3438 targetrevs = list(set(baserevs) | set(targetrevs))
3428 targetrevs.sort()
3439 targetrevs.sort()
3429
3440
3430 cl = repo.changelog
3441 cl = repo.changelog
3431 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3442 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3432 allbaserevs.sort()
3443 allbaserevs.sort()
3433 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3444 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3434
3445
3435 newrevs = list(alltargetrevs.difference(allbaserevs))
3446 newrevs = list(alltargetrevs.difference(allbaserevs))
3436 newrevs.sort()
3447 newrevs.sort()
3437
3448
3438 allrevs = frozenset(unfi.changelog.revs())
3449 allrevs = frozenset(unfi.changelog.revs())
3439 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3450 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3440 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3451 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3441
3452
3442 def basefilter(repo, visibilityexceptions=None):
3453 def basefilter(repo, visibilityexceptions=None):
3443 return basefilterrevs
3454 return basefilterrevs
3444
3455
3445 def targetfilter(repo, visibilityexceptions=None):
3456 def targetfilter(repo, visibilityexceptions=None):
3446 return targetfilterrevs
3457 return targetfilterrevs
3447
3458
3448 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3459 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3449 ui.status(msg % (len(allbaserevs), len(newrevs)))
3460 ui.status(msg % (len(allbaserevs), len(newrevs)))
3450 if targetfilterrevs:
3461 if targetfilterrevs:
3451 msg = b'(%d revisions still filtered)\n'
3462 msg = b'(%d revisions still filtered)\n'
3452 ui.status(msg % len(targetfilterrevs))
3463 ui.status(msg % len(targetfilterrevs))
3453
3464
3454 try:
3465 try:
3455 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3466 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3456 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3467 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3457
3468
3458 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3469 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3459 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3470 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3460
3471
3461 # try to find an existing branchmap to reuse
3472 # try to find an existing branchmap to reuse
3462 subsettable = getbranchmapsubsettable()
3473 subsettable = getbranchmapsubsettable()
3463 candidatefilter = subsettable.get(None)
3474 candidatefilter = subsettable.get(None)
3464 while candidatefilter is not None:
3475 while candidatefilter is not None:
3465 candidatebm = repo.filtered(candidatefilter).branchmap()
3476 candidatebm = repo.filtered(candidatefilter).branchmap()
3466 if candidatebm.validfor(baserepo):
3477 if candidatebm.validfor(baserepo):
3467 filtered = repoview.filterrevs(repo, candidatefilter)
3478 filtered = repoview.filterrevs(repo, candidatefilter)
3468 missing = [r for r in allbaserevs if r in filtered]
3479 missing = [r for r in allbaserevs if r in filtered]
3469 base = candidatebm.copy()
3480 base = candidatebm.copy()
3470 base.update(baserepo, missing)
3481 base.update(baserepo, missing)
3471 break
3482 break
3472 candidatefilter = subsettable.get(candidatefilter)
3483 candidatefilter = subsettable.get(candidatefilter)
3473 else:
3484 else:
3474 # no suitable subset where found
3485 # no suitable subset where found
3475 base = branchmap.branchcache()
3486 base = branchmap.branchcache()
3476 base.update(baserepo, allbaserevs)
3487 base.update(baserepo, allbaserevs)
3477
3488
3478 def setup():
3489 def setup():
3479 x[0] = base.copy()
3490 x[0] = base.copy()
3480 if clearcaches:
3491 if clearcaches:
3481 unfi._revbranchcache = None
3492 unfi._revbranchcache = None
3482 clearchangelog(repo)
3493 clearchangelog(repo)
3483
3494
3484 def bench():
3495 def bench():
3485 x[0].update(targetrepo, newrevs)
3496 x[0].update(targetrepo, newrevs)
3486
3497
3487 timer(bench, setup=setup)
3498 timer(bench, setup=setup)
3488 fm.end()
3499 fm.end()
3489 finally:
3500 finally:
3490 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3501 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3491 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3502 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3492
3503
3493
3504
3494 @command(
3505 @command(
3495 b'perfbranchmapload',
3506 b'perfbranchmapload',
3496 [
3507 [
3497 (b'f', b'filter', b'', b'Specify repoview filter'),
3508 (b'f', b'filter', b'', b'Specify repoview filter'),
3498 (b'', b'list', False, b'List brachmap filter caches'),
3509 (b'', b'list', False, b'List brachmap filter caches'),
3499 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3510 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3500 ]
3511 ]
3501 + formatteropts,
3512 + formatteropts,
3502 )
3513 )
3503 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3514 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3504 """benchmark reading the branchmap"""
3515 """benchmark reading the branchmap"""
3505 opts = _byteskwargs(opts)
3516 opts = _byteskwargs(opts)
3506 clearrevlogs = opts[b'clear_revlogs']
3517 clearrevlogs = opts[b'clear_revlogs']
3507
3518
3508 if list:
3519 if list:
3509 for name, kind, st in repo.cachevfs.readdir(stat=True):
3520 for name, kind, st in repo.cachevfs.readdir(stat=True):
3510 if name.startswith(b'branch2'):
3521 if name.startswith(b'branch2'):
3511 filtername = name.partition(b'-')[2] or b'unfiltered'
3522 filtername = name.partition(b'-')[2] or b'unfiltered'
3512 ui.status(
3523 ui.status(
3513 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3524 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3514 )
3525 )
3515 return
3526 return
3516 if not filter:
3527 if not filter:
3517 filter = None
3528 filter = None
3518 subsettable = getbranchmapsubsettable()
3529 subsettable = getbranchmapsubsettable()
3519 if filter is None:
3530 if filter is None:
3520 repo = repo.unfiltered()
3531 repo = repo.unfiltered()
3521 else:
3532 else:
3522 repo = repoview.repoview(repo, filter)
3533 repo = repoview.repoview(repo, filter)
3523
3534
3524 repo.branchmap() # make sure we have a relevant, up to date branchmap
3535 repo.branchmap() # make sure we have a relevant, up to date branchmap
3525
3536
3526 try:
3537 try:
3527 fromfile = branchmap.branchcache.fromfile
3538 fromfile = branchmap.branchcache.fromfile
3528 except AttributeError:
3539 except AttributeError:
3529 # older versions
3540 # older versions
3530 fromfile = branchmap.read
3541 fromfile = branchmap.read
3531
3542
3532 currentfilter = filter
3543 currentfilter = filter
3533 # try once without timer, the filter may not be cached
3544 # try once without timer, the filter may not be cached
3534 while fromfile(repo) is None:
3545 while fromfile(repo) is None:
3535 currentfilter = subsettable.get(currentfilter)
3546 currentfilter = subsettable.get(currentfilter)
3536 if currentfilter is None:
3547 if currentfilter is None:
3537 raise error.Abort(
3548 raise error.Abort(
3538 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3549 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3539 )
3550 )
3540 repo = repo.filtered(currentfilter)
3551 repo = repo.filtered(currentfilter)
3541 timer, fm = gettimer(ui, opts)
3552 timer, fm = gettimer(ui, opts)
3542
3553
3543 def setup():
3554 def setup():
3544 if clearrevlogs:
3555 if clearrevlogs:
3545 clearchangelog(repo)
3556 clearchangelog(repo)
3546
3557
3547 def bench():
3558 def bench():
3548 fromfile(repo)
3559 fromfile(repo)
3549
3560
3550 timer(bench, setup=setup)
3561 timer(bench, setup=setup)
3551 fm.end()
3562 fm.end()
3552
3563
3553
3564
3554 @command(b'perfloadmarkers')
3565 @command(b'perfloadmarkers')
3555 def perfloadmarkers(ui, repo):
3566 def perfloadmarkers(ui, repo):
3556 """benchmark the time to parse the on-disk markers for a repo
3567 """benchmark the time to parse the on-disk markers for a repo
3557
3568
3558 Result is the number of markers in the repo."""
3569 Result is the number of markers in the repo."""
3559 timer, fm = gettimer(ui)
3570 timer, fm = gettimer(ui)
3560 svfs = getsvfs(repo)
3571 svfs = getsvfs(repo)
3561 timer(lambda: len(obsolete.obsstore(svfs)))
3572 timer(lambda: len(obsolete.obsstore(svfs)))
3562 fm.end()
3573 fm.end()
3563
3574
3564
3575
3565 @command(
3576 @command(
3566 b'perflrucachedict',
3577 b'perflrucachedict',
3567 formatteropts
3578 formatteropts
3568 + [
3579 + [
3569 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3580 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3570 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3581 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3571 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3582 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3572 (b'', b'size', 4, b'size of cache'),
3583 (b'', b'size', 4, b'size of cache'),
3573 (b'', b'gets', 10000, b'number of key lookups'),
3584 (b'', b'gets', 10000, b'number of key lookups'),
3574 (b'', b'sets', 10000, b'number of key sets'),
3585 (b'', b'sets', 10000, b'number of key sets'),
3575 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3586 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3576 (
3587 (
3577 b'',
3588 b'',
3578 b'mixedgetfreq',
3589 b'mixedgetfreq',
3579 50,
3590 50,
3580 b'frequency of get vs set ops in mixed mode',
3591 b'frequency of get vs set ops in mixed mode',
3581 ),
3592 ),
3582 ],
3593 ],
3583 norepo=True,
3594 norepo=True,
3584 )
3595 )
3585 def perflrucache(
3596 def perflrucache(
3586 ui,
3597 ui,
3587 mincost=0,
3598 mincost=0,
3588 maxcost=100,
3599 maxcost=100,
3589 costlimit=0,
3600 costlimit=0,
3590 size=4,
3601 size=4,
3591 gets=10000,
3602 gets=10000,
3592 sets=10000,
3603 sets=10000,
3593 mixed=10000,
3604 mixed=10000,
3594 mixedgetfreq=50,
3605 mixedgetfreq=50,
3595 **opts
3606 **opts
3596 ):
3607 ):
3597 opts = _byteskwargs(opts)
3608 opts = _byteskwargs(opts)
3598
3609
3599 def doinit():
3610 def doinit():
3600 for i in _xrange(10000):
3611 for i in _xrange(10000):
3601 util.lrucachedict(size)
3612 util.lrucachedict(size)
3602
3613
3603 costrange = list(range(mincost, maxcost + 1))
3614 costrange = list(range(mincost, maxcost + 1))
3604
3615
3605 values = []
3616 values = []
3606 for i in _xrange(size):
3617 for i in _xrange(size):
3607 values.append(random.randint(0, _maxint))
3618 values.append(random.randint(0, _maxint))
3608
3619
3609 # Get mode fills the cache and tests raw lookup performance with no
3620 # Get mode fills the cache and tests raw lookup performance with no
3610 # eviction.
3621 # eviction.
3611 getseq = []
3622 getseq = []
3612 for i in _xrange(gets):
3623 for i in _xrange(gets):
3613 getseq.append(random.choice(values))
3624 getseq.append(random.choice(values))
3614
3625
3615 def dogets():
3626 def dogets():
3616 d = util.lrucachedict(size)
3627 d = util.lrucachedict(size)
3617 for v in values:
3628 for v in values:
3618 d[v] = v
3629 d[v] = v
3619 for key in getseq:
3630 for key in getseq:
3620 value = d[key]
3631 value = d[key]
3621 value # silence pyflakes warning
3632 value # silence pyflakes warning
3622
3633
3623 def dogetscost():
3634 def dogetscost():
3624 d = util.lrucachedict(size, maxcost=costlimit)
3635 d = util.lrucachedict(size, maxcost=costlimit)
3625 for i, v in enumerate(values):
3636 for i, v in enumerate(values):
3626 d.insert(v, v, cost=costs[i])
3637 d.insert(v, v, cost=costs[i])
3627 for key in getseq:
3638 for key in getseq:
3628 try:
3639 try:
3629 value = d[key]
3640 value = d[key]
3630 value # silence pyflakes warning
3641 value # silence pyflakes warning
3631 except KeyError:
3642 except KeyError:
3632 pass
3643 pass
3633
3644
3634 # Set mode tests insertion speed with cache eviction.
3645 # Set mode tests insertion speed with cache eviction.
3635 setseq = []
3646 setseq = []
3636 costs = []
3647 costs = []
3637 for i in _xrange(sets):
3648 for i in _xrange(sets):
3638 setseq.append(random.randint(0, _maxint))
3649 setseq.append(random.randint(0, _maxint))
3639 costs.append(random.choice(costrange))
3650 costs.append(random.choice(costrange))
3640
3651
3641 def doinserts():
3652 def doinserts():
3642 d = util.lrucachedict(size)
3653 d = util.lrucachedict(size)
3643 for v in setseq:
3654 for v in setseq:
3644 d.insert(v, v)
3655 d.insert(v, v)
3645
3656
3646 def doinsertscost():
3657 def doinsertscost():
3647 d = util.lrucachedict(size, maxcost=costlimit)
3658 d = util.lrucachedict(size, maxcost=costlimit)
3648 for i, v in enumerate(setseq):
3659 for i, v in enumerate(setseq):
3649 d.insert(v, v, cost=costs[i])
3660 d.insert(v, v, cost=costs[i])
3650
3661
3651 def dosets():
3662 def dosets():
3652 d = util.lrucachedict(size)
3663 d = util.lrucachedict(size)
3653 for v in setseq:
3664 for v in setseq:
3654 d[v] = v
3665 d[v] = v
3655
3666
3656 # Mixed mode randomly performs gets and sets with eviction.
3667 # Mixed mode randomly performs gets and sets with eviction.
3657 mixedops = []
3668 mixedops = []
3658 for i in _xrange(mixed):
3669 for i in _xrange(mixed):
3659 r = random.randint(0, 100)
3670 r = random.randint(0, 100)
3660 if r < mixedgetfreq:
3671 if r < mixedgetfreq:
3661 op = 0
3672 op = 0
3662 else:
3673 else:
3663 op = 1
3674 op = 1
3664
3675
3665 mixedops.append(
3676 mixedops.append(
3666 (op, random.randint(0, size * 2), random.choice(costrange))
3677 (op, random.randint(0, size * 2), random.choice(costrange))
3667 )
3678 )
3668
3679
3669 def domixed():
3680 def domixed():
3670 d = util.lrucachedict(size)
3681 d = util.lrucachedict(size)
3671
3682
3672 for op, v, cost in mixedops:
3683 for op, v, cost in mixedops:
3673 if op == 0:
3684 if op == 0:
3674 try:
3685 try:
3675 d[v]
3686 d[v]
3676 except KeyError:
3687 except KeyError:
3677 pass
3688 pass
3678 else:
3689 else:
3679 d[v] = v
3690 d[v] = v
3680
3691
3681 def domixedcost():
3692 def domixedcost():
3682 d = util.lrucachedict(size, maxcost=costlimit)
3693 d = util.lrucachedict(size, maxcost=costlimit)
3683
3694
3684 for op, v, cost in mixedops:
3695 for op, v, cost in mixedops:
3685 if op == 0:
3696 if op == 0:
3686 try:
3697 try:
3687 d[v]
3698 d[v]
3688 except KeyError:
3699 except KeyError:
3689 pass
3700 pass
3690 else:
3701 else:
3691 d.insert(v, v, cost=cost)
3702 d.insert(v, v, cost=cost)
3692
3703
3693 benches = [
3704 benches = [
3694 (doinit, b'init'),
3705 (doinit, b'init'),
3695 ]
3706 ]
3696
3707
3697 if costlimit:
3708 if costlimit:
3698 benches.extend(
3709 benches.extend(
3699 [
3710 [
3700 (dogetscost, b'gets w/ cost limit'),
3711 (dogetscost, b'gets w/ cost limit'),
3701 (doinsertscost, b'inserts w/ cost limit'),
3712 (doinsertscost, b'inserts w/ cost limit'),
3702 (domixedcost, b'mixed w/ cost limit'),
3713 (domixedcost, b'mixed w/ cost limit'),
3703 ]
3714 ]
3704 )
3715 )
3705 else:
3716 else:
3706 benches.extend(
3717 benches.extend(
3707 [
3718 [
3708 (dogets, b'gets'),
3719 (dogets, b'gets'),
3709 (doinserts, b'inserts'),
3720 (doinserts, b'inserts'),
3710 (dosets, b'sets'),
3721 (dosets, b'sets'),
3711 (domixed, b'mixed'),
3722 (domixed, b'mixed'),
3712 ]
3723 ]
3713 )
3724 )
3714
3725
3715 for fn, title in benches:
3726 for fn, title in benches:
3716 timer, fm = gettimer(ui, opts)
3727 timer, fm = gettimer(ui, opts)
3717 timer(fn, title=title)
3728 timer(fn, title=title)
3718 fm.end()
3729 fm.end()
3719
3730
3720
3731
3721 @command(b'perfwrite', formatteropts)
3732 @command(b'perfwrite', formatteropts)
3722 def perfwrite(ui, repo, **opts):
3733 def perfwrite(ui, repo, **opts):
3723 """microbenchmark ui.write
3734 """microbenchmark ui.write
3724 """
3735 """
3725 opts = _byteskwargs(opts)
3736 opts = _byteskwargs(opts)
3726
3737
3727 timer, fm = gettimer(ui, opts)
3738 timer, fm = gettimer(ui, opts)
3728
3739
3729 def write():
3740 def write():
3730 for i in range(100000):
3741 for i in range(100000):
3731 ui.writenoi18n(b'Testing write performance\n')
3742 ui.writenoi18n(b'Testing write performance\n')
3732
3743
3733 timer(write)
3744 timer(write)
3734 fm.end()
3745 fm.end()
3735
3746
3736
3747
3737 def uisetup(ui):
3748 def uisetup(ui):
3738 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3749 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3739 commands, b'debugrevlogopts'
3750 commands, b'debugrevlogopts'
3740 ):
3751 ):
3741 # for "historical portability":
3752 # for "historical portability":
3742 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3753 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3743 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3754 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3744 # openrevlog() should cause failure, because it has been
3755 # openrevlog() should cause failure, because it has been
3745 # available since 3.5 (or 49c583ca48c4).
3756 # available since 3.5 (or 49c583ca48c4).
3746 def openrevlog(orig, repo, cmd, file_, opts):
3757 def openrevlog(orig, repo, cmd, file_, opts):
3747 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3758 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3748 raise error.Abort(
3759 raise error.Abort(
3749 b"This version doesn't support --dir option",
3760 b"This version doesn't support --dir option",
3750 hint=b"use 3.5 or later",
3761 hint=b"use 3.5 or later",
3751 )
3762 )
3752 return orig(repo, cmd, file_, opts)
3763 return orig(repo, cmd, file_, opts)
3753
3764
3754 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3765 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3755
3766
3756
3767
3757 @command(
3768 @command(
3758 b'perfprogress',
3769 b'perfprogress',
3759 formatteropts
3770 formatteropts
3760 + [
3771 + [
3761 (b'', b'topic', b'topic', b'topic for progress messages'),
3772 (b'', b'topic', b'topic', b'topic for progress messages'),
3762 (b'c', b'total', 1000000, b'total value we are progressing to'),
3773 (b'c', b'total', 1000000, b'total value we are progressing to'),
3763 ],
3774 ],
3764 norepo=True,
3775 norepo=True,
3765 )
3776 )
3766 def perfprogress(ui, topic=None, total=None, **opts):
3777 def perfprogress(ui, topic=None, total=None, **opts):
3767 """printing of progress bars"""
3778 """printing of progress bars"""
3768 opts = _byteskwargs(opts)
3779 opts = _byteskwargs(opts)
3769
3780
3770 timer, fm = gettimer(ui, opts)
3781 timer, fm = gettimer(ui, opts)
3771
3782
3772 def doprogress():
3783 def doprogress():
3773 with ui.makeprogress(topic, total=total) as progress:
3784 with ui.makeprogress(topic, total=total) as progress:
3774 for i in _xrange(total):
3785 for i in _xrange(total):
3775 progress.increment()
3786 progress.increment()
3776
3787
3777 timer(doprogress)
3788 timer(doprogress)
3778 fm.end()
3789 fm.end()
@@ -1,396 +1,397 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perfaddremove
81 perfaddremove
82 (no help text available)
82 (no help text available)
83 perfancestors
83 perfancestors
84 (no help text available)
84 (no help text available)
85 perfancestorset
85 perfancestorset
86 (no help text available)
86 (no help text available)
87 perfannotate (no help text available)
87 perfannotate (no help text available)
88 perfbdiff benchmark a bdiff between revisions
88 perfbdiff benchmark a bdiff between revisions
89 perfbookmarks
89 perfbookmarks
90 benchmark parsing bookmarks from disk to memory
90 benchmark parsing bookmarks from disk to memory
91 perfbranchmap
91 perfbranchmap
92 benchmark the update of a branchmap
92 benchmark the update of a branchmap
93 perfbranchmapload
93 perfbranchmapload
94 benchmark reading the branchmap
94 benchmark reading the branchmap
95 perfbranchmapupdate
95 perfbranchmapupdate
96 benchmark branchmap update from for <base> revs to <target>
96 benchmark branchmap update from for <base> revs to <target>
97 revs
97 revs
98 perfbundleread
98 perfbundleread
99 Benchmark reading of bundle files.
99 Benchmark reading of bundle files.
100 perfcca (no help text available)
100 perfcca (no help text available)
101 perfchangegroupchangelog
101 perfchangegroupchangelog
102 Benchmark producing a changelog group for a changegroup.
102 Benchmark producing a changelog group for a changegroup.
103 perfchangeset
103 perfchangeset
104 (no help text available)
104 (no help text available)
105 perfctxfiles (no help text available)
105 perfctxfiles (no help text available)
106 perfdiffwd Profile diff of working directory changes
106 perfdiffwd Profile diff of working directory changes
107 perfdirfoldmap
107 perfdirfoldmap
108 benchmap a 'dirstate._map.dirfoldmap.get()' request
108 benchmap a 'dirstate._map.dirfoldmap.get()' request
109 perfdirs (no help text available)
109 perfdirs (no help text available)
110 perfdirstate benchmap the time necessary to load a dirstate from scratch
110 perfdirstate benchmap the time of various distate operations
111 perfdirstatedirs
111 perfdirstatedirs
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
113 perfdirstatefoldmap
113 perfdirstatefoldmap
114 benchmap a 'dirstate._map.filefoldmap.get()' request
114 benchmap a 'dirstate._map.filefoldmap.get()' request
115 perfdirstatewrite
115 perfdirstatewrite
116 benchmap the time it take to write a dirstate on disk
116 benchmap the time it take to write a dirstate on disk
117 perfdiscovery
117 perfdiscovery
118 benchmark discovery between local repo and the peer at given
118 benchmark discovery between local repo and the peer at given
119 path
119 path
120 perffncacheencode
120 perffncacheencode
121 (no help text available)
121 (no help text available)
122 perffncacheload
122 perffncacheload
123 (no help text available)
123 (no help text available)
124 perffncachewrite
124 perffncachewrite
125 (no help text available)
125 (no help text available)
126 perfheads benchmark the computation of a changelog heads
126 perfheads benchmark the computation of a changelog heads
127 perfhelper-mergecopies
127 perfhelper-mergecopies
128 find statistics about potential parameters for
128 find statistics about potential parameters for
129 'perfmergecopies'
129 'perfmergecopies'
130 perfhelper-pathcopies
130 perfhelper-pathcopies
131 find statistic about potential parameters for the
131 find statistic about potential parameters for the
132 'perftracecopies'
132 'perftracecopies'
133 perfignore benchmark operation related to computing ignore
133 perfignore benchmark operation related to computing ignore
134 perfindex benchmark index creation time followed by a lookup
134 perfindex benchmark index creation time followed by a lookup
135 perflinelogedits
135 perflinelogedits
136 (no help text available)
136 (no help text available)
137 perfloadmarkers
137 perfloadmarkers
138 benchmark the time to parse the on-disk markers for a repo
138 benchmark the time to parse the on-disk markers for a repo
139 perflog (no help text available)
139 perflog (no help text available)
140 perflookup (no help text available)
140 perflookup (no help text available)
141 perflrucachedict
141 perflrucachedict
142 (no help text available)
142 (no help text available)
143 perfmanifest benchmark the time to read a manifest from disk and return a
143 perfmanifest benchmark the time to read a manifest from disk and return a
144 usable
144 usable
145 perfmergecalculate
145 perfmergecalculate
146 (no help text available)
146 (no help text available)
147 perfmergecopies
147 perfmergecopies
148 measure runtime of 'copies.mergecopies'
148 measure runtime of 'copies.mergecopies'
149 perfmoonwalk benchmark walking the changelog backwards
149 perfmoonwalk benchmark walking the changelog backwards
150 perfnodelookup
150 perfnodelookup
151 (no help text available)
151 (no help text available)
152 perfnodemap benchmark the time necessary to look up revision from a cold
152 perfnodemap benchmark the time necessary to look up revision from a cold
153 nodemap
153 nodemap
154 perfparents benchmark the time necessary to fetch one changeset's parents.
154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 perfpathcopies
155 perfpathcopies
156 benchmark the copy tracing logic
156 benchmark the copy tracing logic
157 perfphases benchmark phasesets computation
157 perfphases benchmark phasesets computation
158 perfphasesremote
158 perfphasesremote
159 benchmark time needed to analyse phases of the remote server
159 benchmark time needed to analyse phases of the remote server
160 perfprogress printing of progress bars
160 perfprogress printing of progress bars
161 perfrawfiles (no help text available)
161 perfrawfiles (no help text available)
162 perfrevlogchunks
162 perfrevlogchunks
163 Benchmark operations on revlog chunks.
163 Benchmark operations on revlog chunks.
164 perfrevlogindex
164 perfrevlogindex
165 Benchmark operations against a revlog index.
165 Benchmark operations against a revlog index.
166 perfrevlogrevision
166 perfrevlogrevision
167 Benchmark obtaining a revlog revision.
167 Benchmark obtaining a revlog revision.
168 perfrevlogrevisions
168 perfrevlogrevisions
169 Benchmark reading a series of revisions from a revlog.
169 Benchmark reading a series of revisions from a revlog.
170 perfrevlogwrite
170 perfrevlogwrite
171 Benchmark writing a series of revisions to a revlog.
171 Benchmark writing a series of revisions to a revlog.
172 perfrevrange (no help text available)
172 perfrevrange (no help text available)
173 perfrevset benchmark the execution time of a revset
173 perfrevset benchmark the execution time of a revset
174 perfstartup (no help text available)
174 perfstartup (no help text available)
175 perfstatus benchmark the performance of a single status call
175 perfstatus benchmark the performance of a single status call
176 perftags (no help text available)
176 perftags (no help text available)
177 perftemplating
177 perftemplating
178 test the rendering time of a given template
178 test the rendering time of a given template
179 perfunidiff benchmark a unified diff between revisions
179 perfunidiff benchmark a unified diff between revisions
180 perfvolatilesets
180 perfvolatilesets
181 benchmark the computation of various volatile set
181 benchmark the computation of various volatile set
182 perfwalk (no help text available)
182 perfwalk (no help text available)
183 perfwrite microbenchmark ui.write
183 perfwrite microbenchmark ui.write
184
184
185 (use 'hg help -v perf' to show built-in aliases and global options)
185 (use 'hg help -v perf' to show built-in aliases and global options)
186 $ hg perfaddremove
186 $ hg perfaddremove
187 $ hg perfancestors
187 $ hg perfancestors
188 $ hg perfancestorset 2
188 $ hg perfancestorset 2
189 $ hg perfannotate a
189 $ hg perfannotate a
190 $ hg perfbdiff -c 1
190 $ hg perfbdiff -c 1
191 $ hg perfbdiff --alldata 1
191 $ hg perfbdiff --alldata 1
192 $ hg perfunidiff -c 1
192 $ hg perfunidiff -c 1
193 $ hg perfunidiff --alldata 1
193 $ hg perfunidiff --alldata 1
194 $ hg perfbookmarks
194 $ hg perfbookmarks
195 $ hg perfbranchmap
195 $ hg perfbranchmap
196 $ hg perfbranchmapload
196 $ hg perfbranchmapload
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 benchmark of branchmap with 3 revisions with 1 new ones
198 benchmark of branchmap with 3 revisions with 1 new ones
199 $ hg perfcca
199 $ hg perfcca
200 $ hg perfchangegroupchangelog
200 $ hg perfchangegroupchangelog
201 $ hg perfchangegroupchangelog --cgversion 01
201 $ hg perfchangegroupchangelog --cgversion 01
202 $ hg perfchangeset 2
202 $ hg perfchangeset 2
203 $ hg perfctxfiles 2
203 $ hg perfctxfiles 2
204 $ hg perfdiffwd
204 $ hg perfdiffwd
205 $ hg perfdirfoldmap
205 $ hg perfdirfoldmap
206 $ hg perfdirs
206 $ hg perfdirs
207 $ hg perfdirstate
207 $ hg perfdirstate
208 $ hg perfdirstate --iteration
208 $ hg perfdirstatedirs
209 $ hg perfdirstatedirs
209 $ hg perfdirstatefoldmap
210 $ hg perfdirstatefoldmap
210 $ hg perfdirstatewrite
211 $ hg perfdirstatewrite
211 #if repofncache
212 #if repofncache
212 $ hg perffncacheencode
213 $ hg perffncacheencode
213 $ hg perffncacheload
214 $ hg perffncacheload
214 $ hg debugrebuildfncache
215 $ hg debugrebuildfncache
215 fncache already up to date
216 fncache already up to date
216 $ hg perffncachewrite
217 $ hg perffncachewrite
217 $ hg debugrebuildfncache
218 $ hg debugrebuildfncache
218 fncache already up to date
219 fncache already up to date
219 #endif
220 #endif
220 $ hg perfheads
221 $ hg perfheads
221 $ hg perfignore
222 $ hg perfignore
222 $ hg perfindex
223 $ hg perfindex
223 $ hg perflinelogedits -n 1
224 $ hg perflinelogedits -n 1
224 $ hg perfloadmarkers
225 $ hg perfloadmarkers
225 $ hg perflog
226 $ hg perflog
226 $ hg perflookup 2
227 $ hg perflookup 2
227 $ hg perflrucache
228 $ hg perflrucache
228 $ hg perfmanifest 2
229 $ hg perfmanifest 2
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
230 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
230 $ hg perfmanifest -m 44fe2c8352bb
231 $ hg perfmanifest -m 44fe2c8352bb
231 abort: manifest revision must be integer or full node
232 abort: manifest revision must be integer or full node
232 [255]
233 [255]
233 $ hg perfmergecalculate -r 3
234 $ hg perfmergecalculate -r 3
234 $ hg perfmoonwalk
235 $ hg perfmoonwalk
235 $ hg perfnodelookup 2
236 $ hg perfnodelookup 2
236 $ hg perfpathcopies 1 2
237 $ hg perfpathcopies 1 2
237 $ hg perfprogress --total 1000
238 $ hg perfprogress --total 1000
238 $ hg perfrawfiles 2
239 $ hg perfrawfiles 2
239 $ hg perfrevlogindex -c
240 $ hg perfrevlogindex -c
240 #if reporevlogstore
241 #if reporevlogstore
241 $ hg perfrevlogrevisions .hg/store/data/a.i
242 $ hg perfrevlogrevisions .hg/store/data/a.i
242 #endif
243 #endif
243 $ hg perfrevlogrevision -m 0
244 $ hg perfrevlogrevision -m 0
244 $ hg perfrevlogchunks -c
245 $ hg perfrevlogchunks -c
245 $ hg perfrevrange
246 $ hg perfrevrange
246 $ hg perfrevset 'all()'
247 $ hg perfrevset 'all()'
247 $ hg perfstartup
248 $ hg perfstartup
248 $ hg perfstatus
249 $ hg perfstatus
249 $ hg perftags
250 $ hg perftags
250 $ hg perftemplating
251 $ hg perftemplating
251 $ hg perfvolatilesets
252 $ hg perfvolatilesets
252 $ hg perfwalk
253 $ hg perfwalk
253 $ hg perfparents
254 $ hg perfparents
254 $ hg perfdiscovery -q .
255 $ hg perfdiscovery -q .
255
256
256 Test run control
257 Test run control
257 ----------------
258 ----------------
258
259
259 Simple single entry
260 Simple single entry
260
261
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
262 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
262 ! wall * comb * user * sys * (best of 15) (glob)
263 ! wall * comb * user * sys * (best of 15) (glob)
263
264
264 Multiple entries
265 Multiple entries
265
266
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
267 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
267 ! wall * comb * user * sys * (best of 5) (glob)
268 ! wall * comb * user * sys * (best of 5) (glob)
268
269
269 error case are ignored
270 error case are ignored
270
271
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
272 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
272 malformatted run limit entry, missing "-": 500
273 malformatted run limit entry, missing "-": 500
273 ! wall * comb * user * sys * (best of 5) (glob)
274 ! wall * comb * user * sys * (best of 5) (glob)
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
275 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
276 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
277 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
277 ! wall * comb * user * sys * (best of 5) (glob)
278 ! wall * comb * user * sys * (best of 5) (glob)
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
279 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
280 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
280 ! wall * comb * user * sys * (best of 5) (glob)
281 ! wall * comb * user * sys * (best of 5) (glob)
281
282
282 test actual output
283 test actual output
283 ------------------
284 ------------------
284
285
285 normal output:
286 normal output:
286
287
287 $ hg perfheads --config perf.stub=no
288 $ hg perfheads --config perf.stub=no
288 ! wall * comb * user * sys * (best of *) (glob)
289 ! wall * comb * user * sys * (best of *) (glob)
289
290
290 detailed output:
291 detailed output:
291
292
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
293 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
293 ! wall * comb * user * sys * (best of *) (glob)
294 ! wall * comb * user * sys * (best of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
295 ! wall * comb * user * sys * (max of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
296 ! wall * comb * user * sys * (avg of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
297 ! wall * comb * user * sys * (median of *) (glob)
297
298
298 test json output
299 test json output
299 ----------------
300 ----------------
300
301
301 normal output:
302 normal output:
302
303
303 $ hg perfheads --template json --config perf.stub=no
304 $ hg perfheads --template json --config perf.stub=no
304 [
305 [
305 {
306 {
306 "comb": *, (glob)
307 "comb": *, (glob)
307 "count": *, (glob)
308 "count": *, (glob)
308 "sys": *, (glob)
309 "sys": *, (glob)
309 "user": *, (glob)
310 "user": *, (glob)
310 "wall": * (glob)
311 "wall": * (glob)
311 }
312 }
312 ]
313 ]
313
314
314 detailed output:
315 detailed output:
315
316
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
317 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
317 [
318 [
318 {
319 {
319 "avg.comb": *, (glob)
320 "avg.comb": *, (glob)
320 "avg.count": *, (glob)
321 "avg.count": *, (glob)
321 "avg.sys": *, (glob)
322 "avg.sys": *, (glob)
322 "avg.user": *, (glob)
323 "avg.user": *, (glob)
323 "avg.wall": *, (glob)
324 "avg.wall": *, (glob)
324 "comb": *, (glob)
325 "comb": *, (glob)
325 "count": *, (glob)
326 "count": *, (glob)
326 "max.comb": *, (glob)
327 "max.comb": *, (glob)
327 "max.count": *, (glob)
328 "max.count": *, (glob)
328 "max.sys": *, (glob)
329 "max.sys": *, (glob)
329 "max.user": *, (glob)
330 "max.user": *, (glob)
330 "max.wall": *, (glob)
331 "max.wall": *, (glob)
331 "median.comb": *, (glob)
332 "median.comb": *, (glob)
332 "median.count": *, (glob)
333 "median.count": *, (glob)
333 "median.sys": *, (glob)
334 "median.sys": *, (glob)
334 "median.user": *, (glob)
335 "median.user": *, (glob)
335 "median.wall": *, (glob)
336 "median.wall": *, (glob)
336 "sys": *, (glob)
337 "sys": *, (glob)
337 "user": *, (glob)
338 "user": *, (glob)
338 "wall": * (glob)
339 "wall": * (glob)
339 }
340 }
340 ]
341 ]
341
342
342 Test pre-run feature
343 Test pre-run feature
343 --------------------
344 --------------------
344
345
345 (perf discovery has some spurious output)
346 (perf discovery has some spurious output)
346
347
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
348 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
348 ! wall * comb * user * sys * (best of 1) (glob)
349 ! wall * comb * user * sys * (best of 1) (glob)
349 searching for changes
350 searching for changes
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
351 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
351 ! wall * comb * user * sys * (best of 1) (glob)
352 ! wall * comb * user * sys * (best of 1) (glob)
352 searching for changes
353 searching for changes
353 searching for changes
354 searching for changes
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
355 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
355 ! wall * comb * user * sys * (best of 1) (glob)
356 ! wall * comb * user * sys * (best of 1) (glob)
356 searching for changes
357 searching for changes
357 searching for changes
358 searching for changes
358 searching for changes
359 searching for changes
359 searching for changes
360 searching for changes
360
361
361 test profile-benchmark option
362 test profile-benchmark option
362 ------------------------------
363 ------------------------------
363
364
364 Function to check that statprof ran
365 Function to check that statprof ran
365 $ statprofran () {
366 $ statprofran () {
366 > egrep 'Sample count:|No samples recorded' > /dev/null
367 > egrep 'Sample count:|No samples recorded' > /dev/null
367 > }
368 > }
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
369 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
369
370
370 Check perf.py for historical portability
371 Check perf.py for historical portability
371 ----------------------------------------
372 ----------------------------------------
372
373
373 $ cd "$TESTDIR/.."
374 $ cd "$TESTDIR/.."
374
375
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
376 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
377 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
378 > "$TESTDIR"/check-perf-code.py contrib/perf.py
378 contrib/perf.py:\d+: (re)
379 contrib/perf.py:\d+: (re)
379 > from mercurial import (
380 > from mercurial import (
380 import newer module separately in try clause for early Mercurial
381 import newer module separately in try clause for early Mercurial
381 contrib/perf.py:\d+: (re)
382 contrib/perf.py:\d+: (re)
382 > from mercurial import (
383 > from mercurial import (
383 import newer module separately in try clause for early Mercurial
384 import newer module separately in try clause for early Mercurial
384 contrib/perf.py:\d+: (re)
385 contrib/perf.py:\d+: (re)
385 > origindexpath = orig.opener.join(orig.indexfile)
386 > origindexpath = orig.opener.join(orig.indexfile)
386 use getvfs()/getsvfs() for early Mercurial
387 use getvfs()/getsvfs() for early Mercurial
387 contrib/perf.py:\d+: (re)
388 contrib/perf.py:\d+: (re)
388 > origdatapath = orig.opener.join(orig.datafile)
389 > origdatapath = orig.opener.join(orig.datafile)
389 use getvfs()/getsvfs() for early Mercurial
390 use getvfs()/getsvfs() for early Mercurial
390 contrib/perf.py:\d+: (re)
391 contrib/perf.py:\d+: (re)
391 > vfs = vfsmod.vfs(tmpdir)
392 > vfs = vfsmod.vfs(tmpdir)
392 use getvfs()/getsvfs() for early Mercurial
393 use getvfs()/getsvfs() for early Mercurial
393 contrib/perf.py:\d+: (re)
394 contrib/perf.py:\d+: (re)
394 > vfs.options = getattr(orig.opener, 'options', None)
395 > vfs.options = getattr(orig.opener, 'options', None)
395 use getvfs()/getsvfs() for early Mercurial
396 use getvfs()/getsvfs() for early Mercurial
396 [1]
397 [1]
General Comments 0
You need to be logged in to leave comments. Login now