##// END OF EJS Templates
perf: document `perfdirfoldmap`
marmoute -
r43397:0b32206c default
parent child Browse files
Show More
@@ -1,3768 +1,3772
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
767 """benchmark the performance of a single status call
768
768
769 The repository data are preserved between each call.
769 The repository data are preserved between each call.
770
770
771 By default, only the status of the tracked file are requested. If
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
773 """
774 opts = _byteskwargs(opts)
774 opts = _byteskwargs(opts)
775 # m = match.always(repo.root, repo.getcwd())
775 # m = match.always(repo.root, repo.getcwd())
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 # False))))
777 # False))))
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 fm.end()
780 fm.end()
781
781
782
782
783 @command(b'perfaddremove', formatteropts)
783 @command(b'perfaddremove', formatteropts)
784 def perfaddremove(ui, repo, **opts):
784 def perfaddremove(ui, repo, **opts):
785 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
787 try:
787 try:
788 oldquiet = repo.ui.quiet
788 oldquiet = repo.ui.quiet
789 repo.ui.quiet = True
789 repo.ui.quiet = True
790 matcher = scmutil.match(repo[None])
790 matcher = scmutil.match(repo[None])
791 opts[b'dry_run'] = True
791 opts[b'dry_run'] = True
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 uipathfn = scmutil.getuipathfn(repo)
793 uipathfn = scmutil.getuipathfn(repo)
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 else:
795 else:
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 finally:
797 finally:
798 repo.ui.quiet = oldquiet
798 repo.ui.quiet = oldquiet
799 fm.end()
799 fm.end()
800
800
801
801
802 def clearcaches(cl):
802 def clearcaches(cl):
803 # behave somewhat consistently across internal API changes
803 # behave somewhat consistently across internal API changes
804 if util.safehasattr(cl, b'clearcaches'):
804 if util.safehasattr(cl, b'clearcaches'):
805 cl.clearcaches()
805 cl.clearcaches()
806 elif util.safehasattr(cl, b'_nodecache'):
806 elif util.safehasattr(cl, b'_nodecache'):
807 from mercurial.node import nullid, nullrev
807 from mercurial.node import nullid, nullrev
808
808
809 cl._nodecache = {nullid: nullrev}
809 cl._nodecache = {nullid: nullrev}
810 cl._nodepos = None
810 cl._nodepos = None
811
811
812
812
813 @command(b'perfheads', formatteropts)
813 @command(b'perfheads', formatteropts)
814 def perfheads(ui, repo, **opts):
814 def perfheads(ui, repo, **opts):
815 """benchmark the computation of a changelog heads"""
815 """benchmark the computation of a changelog heads"""
816 opts = _byteskwargs(opts)
816 opts = _byteskwargs(opts)
817 timer, fm = gettimer(ui, opts)
817 timer, fm = gettimer(ui, opts)
818 cl = repo.changelog
818 cl = repo.changelog
819
819
820 def s():
820 def s():
821 clearcaches(cl)
821 clearcaches(cl)
822
822
823 def d():
823 def d():
824 len(cl.headrevs())
824 len(cl.headrevs())
825
825
826 timer(d, setup=s)
826 timer(d, setup=s)
827 fm.end()
827 fm.end()
828
828
829
829
830 @command(
830 @command(
831 b'perftags',
831 b'perftags',
832 formatteropts
832 formatteropts
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 )
834 )
835 def perftags(ui, repo, **opts):
835 def perftags(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 repocleartagscache = repocleartagscachefunc(repo)
838 repocleartagscache = repocleartagscachefunc(repo)
839 clearrevlogs = opts[b'clear_revlogs']
839 clearrevlogs = opts[b'clear_revlogs']
840
840
841 def s():
841 def s():
842 if clearrevlogs:
842 if clearrevlogs:
843 clearchangelog(repo)
843 clearchangelog(repo)
844 clearfilecache(repo.unfiltered(), 'manifest')
844 clearfilecache(repo.unfiltered(), 'manifest')
845 repocleartagscache()
845 repocleartagscache()
846
846
847 def t():
847 def t():
848 return len(repo.tags())
848 return len(repo.tags())
849
849
850 timer(t, setup=s)
850 timer(t, setup=s)
851 fm.end()
851 fm.end()
852
852
853
853
854 @command(b'perfancestors', formatteropts)
854 @command(b'perfancestors', formatteropts)
855 def perfancestors(ui, repo, **opts):
855 def perfancestors(ui, repo, **opts):
856 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
858 heads = repo.changelog.headrevs()
858 heads = repo.changelog.headrevs()
859
859
860 def d():
860 def d():
861 for a in repo.changelog.ancestors(heads):
861 for a in repo.changelog.ancestors(heads):
862 pass
862 pass
863
863
864 timer(d)
864 timer(d)
865 fm.end()
865 fm.end()
866
866
867
867
868 @command(b'perfancestorset', formatteropts)
868 @command(b'perfancestorset', formatteropts)
869 def perfancestorset(ui, repo, revset, **opts):
869 def perfancestorset(ui, repo, revset, **opts):
870 opts = _byteskwargs(opts)
870 opts = _byteskwargs(opts)
871 timer, fm = gettimer(ui, opts)
871 timer, fm = gettimer(ui, opts)
872 revs = repo.revs(revset)
872 revs = repo.revs(revset)
873 heads = repo.changelog.headrevs()
873 heads = repo.changelog.headrevs()
874
874
875 def d():
875 def d():
876 s = repo.changelog.ancestors(heads)
876 s = repo.changelog.ancestors(heads)
877 for rev in revs:
877 for rev in revs:
878 rev in s
878 rev in s
879
879
880 timer(d)
880 timer(d)
881 fm.end()
881 fm.end()
882
882
883
883
884 @command(b'perfdiscovery', formatteropts, b'PATH')
884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 def perfdiscovery(ui, repo, path, **opts):
885 def perfdiscovery(ui, repo, path, **opts):
886 """benchmark discovery between local repo and the peer at given path
886 """benchmark discovery between local repo and the peer at given path
887 """
887 """
888 repos = [repo, None]
888 repos = [repo, None]
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 path = ui.expandpath(path)
890 path = ui.expandpath(path)
891
891
892 def s():
892 def s():
893 repos[1] = hg.peer(ui, opts, path)
893 repos[1] = hg.peer(ui, opts, path)
894
894
895 def d():
895 def d():
896 setdiscovery.findcommonheads(ui, *repos)
896 setdiscovery.findcommonheads(ui, *repos)
897
897
898 timer(d, setup=s)
898 timer(d, setup=s)
899 fm.end()
899 fm.end()
900
900
901
901
902 @command(
902 @command(
903 b'perfbookmarks',
903 b'perfbookmarks',
904 formatteropts
904 formatteropts
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 )
906 )
907 def perfbookmarks(ui, repo, **opts):
907 def perfbookmarks(ui, repo, **opts):
908 """benchmark parsing bookmarks from disk to memory"""
908 """benchmark parsing bookmarks from disk to memory"""
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911
911
912 clearrevlogs = opts[b'clear_revlogs']
912 clearrevlogs = opts[b'clear_revlogs']
913
913
914 def s():
914 def s():
915 if clearrevlogs:
915 if clearrevlogs:
916 clearchangelog(repo)
916 clearchangelog(repo)
917 clearfilecache(repo, b'_bookmarks')
917 clearfilecache(repo, b'_bookmarks')
918
918
919 def d():
919 def d():
920 repo._bookmarks
920 repo._bookmarks
921
921
922 timer(d, setup=s)
922 timer(d, setup=s)
923 fm.end()
923 fm.end()
924
924
925
925
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 def perfbundleread(ui, repo, bundlepath, **opts):
927 def perfbundleread(ui, repo, bundlepath, **opts):
928 """Benchmark reading of bundle files.
928 """Benchmark reading of bundle files.
929
929
930 This command is meant to isolate the I/O part of bundle reading as
930 This command is meant to isolate the I/O part of bundle reading as
931 much as possible.
931 much as possible.
932 """
932 """
933 from mercurial import (
933 from mercurial import (
934 bundle2,
934 bundle2,
935 exchange,
935 exchange,
936 streamclone,
936 streamclone,
937 )
937 )
938
938
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940
940
941 def makebench(fn):
941 def makebench(fn):
942 def run():
942 def run():
943 with open(bundlepath, b'rb') as fh:
943 with open(bundlepath, b'rb') as fh:
944 bundle = exchange.readbundle(ui, fh, bundlepath)
944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 fn(bundle)
945 fn(bundle)
946
946
947 return run
947 return run
948
948
949 def makereadnbytes(size):
949 def makereadnbytes(size):
950 def run():
950 def run():
951 with open(bundlepath, b'rb') as fh:
951 with open(bundlepath, b'rb') as fh:
952 bundle = exchange.readbundle(ui, fh, bundlepath)
952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 while bundle.read(size):
953 while bundle.read(size):
954 pass
954 pass
955
955
956 return run
956 return run
957
957
958 def makestdioread(size):
958 def makestdioread(size):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 while fh.read(size):
961 while fh.read(size):
962 pass
962 pass
963
963
964 return run
964 return run
965
965
966 # bundle1
966 # bundle1
967
967
968 def deltaiter(bundle):
968 def deltaiter(bundle):
969 for delta in bundle.deltaiter():
969 for delta in bundle.deltaiter():
970 pass
970 pass
971
971
972 def iterchunks(bundle):
972 def iterchunks(bundle):
973 for chunk in bundle.getchunks():
973 for chunk in bundle.getchunks():
974 pass
974 pass
975
975
976 # bundle2
976 # bundle2
977
977
978 def forwardchunks(bundle):
978 def forwardchunks(bundle):
979 for chunk in bundle._forwardchunks():
979 for chunk in bundle._forwardchunks():
980 pass
980 pass
981
981
982 def iterparts(bundle):
982 def iterparts(bundle):
983 for part in bundle.iterparts():
983 for part in bundle.iterparts():
984 pass
984 pass
985
985
986 def iterpartsseekable(bundle):
986 def iterpartsseekable(bundle):
987 for part in bundle.iterparts(seekable=True):
987 for part in bundle.iterparts(seekable=True):
988 pass
988 pass
989
989
990 def seek(bundle):
990 def seek(bundle):
991 for part in bundle.iterparts(seekable=True):
991 for part in bundle.iterparts(seekable=True):
992 part.seek(0, os.SEEK_END)
992 part.seek(0, os.SEEK_END)
993
993
994 def makepartreadnbytes(size):
994 def makepartreadnbytes(size):
995 def run():
995 def run():
996 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
997 bundle = exchange.readbundle(ui, fh, bundlepath)
997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 for part in bundle.iterparts():
998 for part in bundle.iterparts():
999 while part.read(size):
999 while part.read(size):
1000 pass
1000 pass
1001
1001
1002 return run
1002 return run
1003
1003
1004 benches = [
1004 benches = [
1005 (makestdioread(8192), b'read(8k)'),
1005 (makestdioread(8192), b'read(8k)'),
1006 (makestdioread(16384), b'read(16k)'),
1006 (makestdioread(16384), b'read(16k)'),
1007 (makestdioread(32768), b'read(32k)'),
1007 (makestdioread(32768), b'read(32k)'),
1008 (makestdioread(131072), b'read(128k)'),
1008 (makestdioread(131072), b'read(128k)'),
1009 ]
1009 ]
1010
1010
1011 with open(bundlepath, b'rb') as fh:
1011 with open(bundlepath, b'rb') as fh:
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013
1013
1014 if isinstance(bundle, changegroup.cg1unpacker):
1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 benches.extend(
1015 benches.extend(
1016 [
1016 [
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 ]
1023 ]
1024 )
1024 )
1025 elif isinstance(bundle, bundle2.unbundle20):
1025 elif isinstance(bundle, bundle2.unbundle20):
1026 benches.extend(
1026 benches.extend(
1027 [
1027 [
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 (
1030 (
1031 makebench(iterpartsseekable),
1031 makebench(iterpartsseekable),
1032 b'bundle2 iterparts() seekable',
1032 b'bundle2 iterparts() seekable',
1033 ),
1033 ),
1034 (makebench(seek), b'bundle2 part seek()'),
1034 (makebench(seek), b'bundle2 part seek()'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 ]
1039 ]
1040 )
1040 )
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 raise error.Abort(b'stream clone bundles not supported')
1042 raise error.Abort(b'stream clone bundles not supported')
1043 else:
1043 else:
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045
1045
1046 for fn, title in benches:
1046 for fn, title in benches:
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 timer(fn, title=title)
1048 timer(fn, title=title)
1049 fm.end()
1049 fm.end()
1050
1050
1051
1051
1052 @command(
1052 @command(
1053 b'perfchangegroupchangelog',
1053 b'perfchangegroupchangelog',
1054 formatteropts
1054 formatteropts
1055 + [
1055 + [
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 ],
1058 ],
1059 )
1059 )
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 """Benchmark producing a changelog group for a changegroup.
1061 """Benchmark producing a changelog group for a changegroup.
1062
1062
1063 This measures the time spent processing the changelog during a
1063 This measures the time spent processing the changelog during a
1064 bundle operation. This occurs during `hg bundle` and on a server
1064 bundle operation. This occurs during `hg bundle` and on a server
1065 processing a `getbundle` wire protocol request (handles clones
1065 processing a `getbundle` wire protocol request (handles clones
1066 and pull requests).
1066 and pull requests).
1067
1067
1068 By default, all revisions are added to the changegroup.
1068 By default, all revisions are added to the changegroup.
1069 """
1069 """
1070 opts = _byteskwargs(opts)
1070 opts = _byteskwargs(opts)
1071 cl = repo.changelog
1071 cl = repo.changelog
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 bundler = changegroup.getbundler(cgversion, repo)
1073 bundler = changegroup.getbundler(cgversion, repo)
1074
1074
1075 def d():
1075 def d():
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 for chunk in chunks:
1077 for chunk in chunks:
1078 pass
1078 pass
1079
1079
1080 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1081
1081
1082 # Terminal printing can interfere with timing. So disable it.
1082 # Terminal printing can interfere with timing. So disable it.
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 timer(d)
1084 timer(d)
1085
1085
1086 fm.end()
1086 fm.end()
1087
1087
1088
1088
1089 @command(b'perfdirs', formatteropts)
1089 @command(b'perfdirs', formatteropts)
1090 def perfdirs(ui, repo, **opts):
1090 def perfdirs(ui, repo, **opts):
1091 opts = _byteskwargs(opts)
1091 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1093 dirstate = repo.dirstate
1093 dirstate = repo.dirstate
1094 b'a' in dirstate
1094 b'a' in dirstate
1095
1095
1096 def d():
1096 def d():
1097 dirstate.hasdir(b'a')
1097 dirstate.hasdir(b'a')
1098 del dirstate._map._dirs
1098 del dirstate._map._dirs
1099
1099
1100 timer(d)
1100 timer(d)
1101 fm.end()
1101 fm.end()
1102
1102
1103
1103
1104 @command(b'perfdirstate', formatteropts)
1104 @command(b'perfdirstate', formatteropts)
1105 def perfdirstate(ui, repo, **opts):
1105 def perfdirstate(ui, repo, **opts):
1106 """benchmap the time necessary to load a dirstate from scratch
1106 """benchmap the time necessary to load a dirstate from scratch
1107
1107
1108 The dirstate is loaded to the point were a "contains" request can be
1108 The dirstate is loaded to the point were a "contains" request can be
1109 answered.
1109 answered.
1110 """
1110 """
1111 opts = _byteskwargs(opts)
1111 opts = _byteskwargs(opts)
1112 timer, fm = gettimer(ui, opts)
1112 timer, fm = gettimer(ui, opts)
1113 b"a" in repo.dirstate
1113 b"a" in repo.dirstate
1114
1114
1115 def setup():
1115 def setup():
1116 repo.dirstate.invalidate()
1116 repo.dirstate.invalidate()
1117
1117
1118 def d():
1118 def d():
1119 b"a" in repo.dirstate
1119 b"a" in repo.dirstate
1120
1120
1121 timer(d, setup=setup)
1121 timer(d, setup=setup)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perfdirstatedirs', formatteropts)
1125 @command(b'perfdirstatedirs', formatteropts)
1126 def perfdirstatedirs(ui, repo, **opts):
1126 def perfdirstatedirs(ui, repo, **opts):
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1128 """
1128 """
1129 opts = _byteskwargs(opts)
1129 opts = _byteskwargs(opts)
1130 timer, fm = gettimer(ui, opts)
1130 timer, fm = gettimer(ui, opts)
1131 repo.dirstate.hasdir(b"a")
1131 repo.dirstate.hasdir(b"a")
1132
1132
1133 def setup():
1133 def setup():
1134 del repo.dirstate._map._dirs
1134 del repo.dirstate._map._dirs
1135
1135
1136 def d():
1136 def d():
1137 repo.dirstate.hasdir(b"a")
1137 repo.dirstate.hasdir(b"a")
1138
1138
1139 timer(d, setup=setup)
1139 timer(d, setup=setup)
1140 fm.end()
1140 fm.end()
1141
1141
1142
1142
1143 @command(b'perfdirstatefoldmap', formatteropts)
1143 @command(b'perfdirstatefoldmap', formatteropts)
1144 def perfdirstatefoldmap(ui, repo, **opts):
1144 def perfdirstatefoldmap(ui, repo, **opts):
1145 """benchmap a `dirstate._map.filefoldmap.get()` request
1145 """benchmap a `dirstate._map.filefoldmap.get()` request
1146
1146
1147 The dirstate filefoldmap cache is dropped between every request.
1147 The dirstate filefoldmap cache is dropped between every request.
1148 """
1148 """
1149 opts = _byteskwargs(opts)
1149 opts = _byteskwargs(opts)
1150 timer, fm = gettimer(ui, opts)
1150 timer, fm = gettimer(ui, opts)
1151 dirstate = repo.dirstate
1151 dirstate = repo.dirstate
1152 dirstate._map.filefoldmap.get(b'a')
1152 dirstate._map.filefoldmap.get(b'a')
1153
1153
1154 def setup():
1154 def setup():
1155 del dirstate._map.filefoldmap
1155 del dirstate._map.filefoldmap
1156
1156
1157 def d():
1157 def d():
1158 dirstate._map.filefoldmap.get(b'a')
1158 dirstate._map.filefoldmap.get(b'a')
1159
1159
1160 timer(d, setup=setup)
1160 timer(d, setup=setup)
1161 fm.end()
1161 fm.end()
1162
1162
1163
1163
1164 @command(b'perfdirfoldmap', formatteropts)
1164 @command(b'perfdirfoldmap', formatteropts)
1165 def perfdirfoldmap(ui, repo, **opts):
1165 def perfdirfoldmap(ui, repo, **opts):
1166 """benchmap a `dirstate._map.dirfoldmap.get()` request
1167
1168 The dirstate dirfoldmap cache is dropped between every request.
1169 """
1166 opts = _byteskwargs(opts)
1170 opts = _byteskwargs(opts)
1167 timer, fm = gettimer(ui, opts)
1171 timer, fm = gettimer(ui, opts)
1168 dirstate = repo.dirstate
1172 dirstate = repo.dirstate
1169 b'a' in dirstate
1173 b'a' in dirstate
1170
1174
1171 def d():
1175 def d():
1172 dirstate._map.dirfoldmap.get(b'a')
1176 dirstate._map.dirfoldmap.get(b'a')
1173 del dirstate._map.dirfoldmap
1177 del dirstate._map.dirfoldmap
1174 del dirstate._map._dirs
1178 del dirstate._map._dirs
1175
1179
1176 timer(d)
1180 timer(d)
1177 fm.end()
1181 fm.end()
1178
1182
1179
1183
1180 @command(b'perfdirstatewrite', formatteropts)
1184 @command(b'perfdirstatewrite', formatteropts)
1181 def perfdirstatewrite(ui, repo, **opts):
1185 def perfdirstatewrite(ui, repo, **opts):
1182 opts = _byteskwargs(opts)
1186 opts = _byteskwargs(opts)
1183 timer, fm = gettimer(ui, opts)
1187 timer, fm = gettimer(ui, opts)
1184 ds = repo.dirstate
1188 ds = repo.dirstate
1185 b"a" in ds
1189 b"a" in ds
1186
1190
1187 def d():
1191 def d():
1188 ds._dirty = True
1192 ds._dirty = True
1189 ds.write(repo.currenttransaction())
1193 ds.write(repo.currenttransaction())
1190
1194
1191 timer(d)
1195 timer(d)
1192 fm.end()
1196 fm.end()
1193
1197
1194
1198
1195 def _getmergerevs(repo, opts):
1199 def _getmergerevs(repo, opts):
1196 """parse command argument to return rev involved in merge
1200 """parse command argument to return rev involved in merge
1197
1201
1198 input: options dictionnary with `rev`, `from` and `bse`
1202 input: options dictionnary with `rev`, `from` and `bse`
1199 output: (localctx, otherctx, basectx)
1203 output: (localctx, otherctx, basectx)
1200 """
1204 """
1201 if opts[b'from']:
1205 if opts[b'from']:
1202 fromrev = scmutil.revsingle(repo, opts[b'from'])
1206 fromrev = scmutil.revsingle(repo, opts[b'from'])
1203 wctx = repo[fromrev]
1207 wctx = repo[fromrev]
1204 else:
1208 else:
1205 wctx = repo[None]
1209 wctx = repo[None]
1206 # we don't want working dir files to be stat'd in the benchmark, so
1210 # we don't want working dir files to be stat'd in the benchmark, so
1207 # prime that cache
1211 # prime that cache
1208 wctx.dirty()
1212 wctx.dirty()
1209 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1213 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1210 if opts[b'base']:
1214 if opts[b'base']:
1211 fromrev = scmutil.revsingle(repo, opts[b'base'])
1215 fromrev = scmutil.revsingle(repo, opts[b'base'])
1212 ancestor = repo[fromrev]
1216 ancestor = repo[fromrev]
1213 else:
1217 else:
1214 ancestor = wctx.ancestor(rctx)
1218 ancestor = wctx.ancestor(rctx)
1215 return (wctx, rctx, ancestor)
1219 return (wctx, rctx, ancestor)
1216
1220
1217
1221
1218 @command(
1222 @command(
1219 b'perfmergecalculate',
1223 b'perfmergecalculate',
1220 [
1224 [
1221 (b'r', b'rev', b'.', b'rev to merge against'),
1225 (b'r', b'rev', b'.', b'rev to merge against'),
1222 (b'', b'from', b'', b'rev to merge from'),
1226 (b'', b'from', b'', b'rev to merge from'),
1223 (b'', b'base', b'', b'the revision to use as base'),
1227 (b'', b'base', b'', b'the revision to use as base'),
1224 ]
1228 ]
1225 + formatteropts,
1229 + formatteropts,
1226 )
1230 )
1227 def perfmergecalculate(ui, repo, **opts):
1231 def perfmergecalculate(ui, repo, **opts):
1228 opts = _byteskwargs(opts)
1232 opts = _byteskwargs(opts)
1229 timer, fm = gettimer(ui, opts)
1233 timer, fm = gettimer(ui, opts)
1230
1234
1231 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1235 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1232
1236
1233 def d():
1237 def d():
1234 # acceptremote is True because we don't want prompts in the middle of
1238 # acceptremote is True because we don't want prompts in the middle of
1235 # our benchmark
1239 # our benchmark
1236 merge.calculateupdates(
1240 merge.calculateupdates(
1237 repo,
1241 repo,
1238 wctx,
1242 wctx,
1239 rctx,
1243 rctx,
1240 [ancestor],
1244 [ancestor],
1241 branchmerge=False,
1245 branchmerge=False,
1242 force=False,
1246 force=False,
1243 acceptremote=True,
1247 acceptremote=True,
1244 followcopies=True,
1248 followcopies=True,
1245 )
1249 )
1246
1250
1247 timer(d)
1251 timer(d)
1248 fm.end()
1252 fm.end()
1249
1253
1250
1254
1251 @command(
1255 @command(
1252 b'perfmergecopies',
1256 b'perfmergecopies',
1253 [
1257 [
1254 (b'r', b'rev', b'.', b'rev to merge against'),
1258 (b'r', b'rev', b'.', b'rev to merge against'),
1255 (b'', b'from', b'', b'rev to merge from'),
1259 (b'', b'from', b'', b'rev to merge from'),
1256 (b'', b'base', b'', b'the revision to use as base'),
1260 (b'', b'base', b'', b'the revision to use as base'),
1257 ]
1261 ]
1258 + formatteropts,
1262 + formatteropts,
1259 )
1263 )
1260 def perfmergecopies(ui, repo, **opts):
1264 def perfmergecopies(ui, repo, **opts):
1261 """measure runtime of `copies.mergecopies`"""
1265 """measure runtime of `copies.mergecopies`"""
1262 opts = _byteskwargs(opts)
1266 opts = _byteskwargs(opts)
1263 timer, fm = gettimer(ui, opts)
1267 timer, fm = gettimer(ui, opts)
1264 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1268 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1265
1269
1266 def d():
1270 def d():
1267 # acceptremote is True because we don't want prompts in the middle of
1271 # acceptremote is True because we don't want prompts in the middle of
1268 # our benchmark
1272 # our benchmark
1269 copies.mergecopies(repo, wctx, rctx, ancestor)
1273 copies.mergecopies(repo, wctx, rctx, ancestor)
1270
1274
1271 timer(d)
1275 timer(d)
1272 fm.end()
1276 fm.end()
1273
1277
1274
1278
1275 @command(b'perfpathcopies', [], b"REV REV")
1279 @command(b'perfpathcopies', [], b"REV REV")
1276 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1280 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1277 """benchmark the copy tracing logic"""
1281 """benchmark the copy tracing logic"""
1278 opts = _byteskwargs(opts)
1282 opts = _byteskwargs(opts)
1279 timer, fm = gettimer(ui, opts)
1283 timer, fm = gettimer(ui, opts)
1280 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1284 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1281 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1285 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1282
1286
1283 def d():
1287 def d():
1284 copies.pathcopies(ctx1, ctx2)
1288 copies.pathcopies(ctx1, ctx2)
1285
1289
1286 timer(d)
1290 timer(d)
1287 fm.end()
1291 fm.end()
1288
1292
1289
1293
1290 @command(
1294 @command(
1291 b'perfphases',
1295 b'perfphases',
1292 [(b'', b'full', False, b'include file reading time too'),],
1296 [(b'', b'full', False, b'include file reading time too'),],
1293 b"",
1297 b"",
1294 )
1298 )
1295 def perfphases(ui, repo, **opts):
1299 def perfphases(ui, repo, **opts):
1296 """benchmark phasesets computation"""
1300 """benchmark phasesets computation"""
1297 opts = _byteskwargs(opts)
1301 opts = _byteskwargs(opts)
1298 timer, fm = gettimer(ui, opts)
1302 timer, fm = gettimer(ui, opts)
1299 _phases = repo._phasecache
1303 _phases = repo._phasecache
1300 full = opts.get(b'full')
1304 full = opts.get(b'full')
1301
1305
1302 def d():
1306 def d():
1303 phases = _phases
1307 phases = _phases
1304 if full:
1308 if full:
1305 clearfilecache(repo, b'_phasecache')
1309 clearfilecache(repo, b'_phasecache')
1306 phases = repo._phasecache
1310 phases = repo._phasecache
1307 phases.invalidate()
1311 phases.invalidate()
1308 phases.loadphaserevs(repo)
1312 phases.loadphaserevs(repo)
1309
1313
1310 timer(d)
1314 timer(d)
1311 fm.end()
1315 fm.end()
1312
1316
1313
1317
1314 @command(b'perfphasesremote', [], b"[DEST]")
1318 @command(b'perfphasesremote', [], b"[DEST]")
1315 def perfphasesremote(ui, repo, dest=None, **opts):
1319 def perfphasesremote(ui, repo, dest=None, **opts):
1316 """benchmark time needed to analyse phases of the remote server"""
1320 """benchmark time needed to analyse phases of the remote server"""
1317 from mercurial.node import bin
1321 from mercurial.node import bin
1318 from mercurial import (
1322 from mercurial import (
1319 exchange,
1323 exchange,
1320 hg,
1324 hg,
1321 phases,
1325 phases,
1322 )
1326 )
1323
1327
1324 opts = _byteskwargs(opts)
1328 opts = _byteskwargs(opts)
1325 timer, fm = gettimer(ui, opts)
1329 timer, fm = gettimer(ui, opts)
1326
1330
1327 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1331 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1328 if not path:
1332 if not path:
1329 raise error.Abort(
1333 raise error.Abort(
1330 b'default repository not configured!',
1334 b'default repository not configured!',
1331 hint=b"see 'hg help config.paths'",
1335 hint=b"see 'hg help config.paths'",
1332 )
1336 )
1333 dest = path.pushloc or path.loc
1337 dest = path.pushloc or path.loc
1334 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1338 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1335 other = hg.peer(repo, opts, dest)
1339 other = hg.peer(repo, opts, dest)
1336
1340
1337 # easier to perform discovery through the operation
1341 # easier to perform discovery through the operation
1338 op = exchange.pushoperation(repo, other)
1342 op = exchange.pushoperation(repo, other)
1339 exchange._pushdiscoverychangeset(op)
1343 exchange._pushdiscoverychangeset(op)
1340
1344
1341 remotesubset = op.fallbackheads
1345 remotesubset = op.fallbackheads
1342
1346
1343 with other.commandexecutor() as e:
1347 with other.commandexecutor() as e:
1344 remotephases = e.callcommand(
1348 remotephases = e.callcommand(
1345 b'listkeys', {b'namespace': b'phases'}
1349 b'listkeys', {b'namespace': b'phases'}
1346 ).result()
1350 ).result()
1347 del other
1351 del other
1348 publishing = remotephases.get(b'publishing', False)
1352 publishing = remotephases.get(b'publishing', False)
1349 if publishing:
1353 if publishing:
1350 ui.statusnoi18n(b'publishing: yes\n')
1354 ui.statusnoi18n(b'publishing: yes\n')
1351 else:
1355 else:
1352 ui.statusnoi18n(b'publishing: no\n')
1356 ui.statusnoi18n(b'publishing: no\n')
1353
1357
1354 nodemap = repo.changelog.nodemap
1358 nodemap = repo.changelog.nodemap
1355 nonpublishroots = 0
1359 nonpublishroots = 0
1356 for nhex, phase in remotephases.iteritems():
1360 for nhex, phase in remotephases.iteritems():
1357 if nhex == b'publishing': # ignore data related to publish option
1361 if nhex == b'publishing': # ignore data related to publish option
1358 continue
1362 continue
1359 node = bin(nhex)
1363 node = bin(nhex)
1360 if node in nodemap and int(phase):
1364 if node in nodemap and int(phase):
1361 nonpublishroots += 1
1365 nonpublishroots += 1
1362 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1366 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1363 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1367 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1364
1368
1365 def d():
1369 def d():
1366 phases.remotephasessummary(repo, remotesubset, remotephases)
1370 phases.remotephasessummary(repo, remotesubset, remotephases)
1367
1371
1368 timer(d)
1372 timer(d)
1369 fm.end()
1373 fm.end()
1370
1374
1371
1375
1372 @command(
1376 @command(
1373 b'perfmanifest',
1377 b'perfmanifest',
1374 [
1378 [
1375 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1379 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1376 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1380 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1377 ]
1381 ]
1378 + formatteropts,
1382 + formatteropts,
1379 b'REV|NODE',
1383 b'REV|NODE',
1380 )
1384 )
1381 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1385 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1382 """benchmark the time to read a manifest from disk and return a usable
1386 """benchmark the time to read a manifest from disk and return a usable
1383 dict-like object
1387 dict-like object
1384
1388
1385 Manifest caches are cleared before retrieval."""
1389 Manifest caches are cleared before retrieval."""
1386 opts = _byteskwargs(opts)
1390 opts = _byteskwargs(opts)
1387 timer, fm = gettimer(ui, opts)
1391 timer, fm = gettimer(ui, opts)
1388 if not manifest_rev:
1392 if not manifest_rev:
1389 ctx = scmutil.revsingle(repo, rev, rev)
1393 ctx = scmutil.revsingle(repo, rev, rev)
1390 t = ctx.manifestnode()
1394 t = ctx.manifestnode()
1391 else:
1395 else:
1392 from mercurial.node import bin
1396 from mercurial.node import bin
1393
1397
1394 if len(rev) == 40:
1398 if len(rev) == 40:
1395 t = bin(rev)
1399 t = bin(rev)
1396 else:
1400 else:
1397 try:
1401 try:
1398 rev = int(rev)
1402 rev = int(rev)
1399
1403
1400 if util.safehasattr(repo.manifestlog, b'getstorage'):
1404 if util.safehasattr(repo.manifestlog, b'getstorage'):
1401 t = repo.manifestlog.getstorage(b'').node(rev)
1405 t = repo.manifestlog.getstorage(b'').node(rev)
1402 else:
1406 else:
1403 t = repo.manifestlog._revlog.lookup(rev)
1407 t = repo.manifestlog._revlog.lookup(rev)
1404 except ValueError:
1408 except ValueError:
1405 raise error.Abort(
1409 raise error.Abort(
1406 b'manifest revision must be integer or full node'
1410 b'manifest revision must be integer or full node'
1407 )
1411 )
1408
1412
1409 def d():
1413 def d():
1410 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1414 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1411 repo.manifestlog[t].read()
1415 repo.manifestlog[t].read()
1412
1416
1413 timer(d)
1417 timer(d)
1414 fm.end()
1418 fm.end()
1415
1419
1416
1420
1417 @command(b'perfchangeset', formatteropts)
1421 @command(b'perfchangeset', formatteropts)
1418 def perfchangeset(ui, repo, rev, **opts):
1422 def perfchangeset(ui, repo, rev, **opts):
1419 opts = _byteskwargs(opts)
1423 opts = _byteskwargs(opts)
1420 timer, fm = gettimer(ui, opts)
1424 timer, fm = gettimer(ui, opts)
1421 n = scmutil.revsingle(repo, rev).node()
1425 n = scmutil.revsingle(repo, rev).node()
1422
1426
1423 def d():
1427 def d():
1424 repo.changelog.read(n)
1428 repo.changelog.read(n)
1425 # repo.changelog._cache = None
1429 # repo.changelog._cache = None
1426
1430
1427 timer(d)
1431 timer(d)
1428 fm.end()
1432 fm.end()
1429
1433
1430
1434
1431 @command(b'perfignore', formatteropts)
1435 @command(b'perfignore', formatteropts)
1432 def perfignore(ui, repo, **opts):
1436 def perfignore(ui, repo, **opts):
1433 """benchmark operation related to computing ignore"""
1437 """benchmark operation related to computing ignore"""
1434 opts = _byteskwargs(opts)
1438 opts = _byteskwargs(opts)
1435 timer, fm = gettimer(ui, opts)
1439 timer, fm = gettimer(ui, opts)
1436 dirstate = repo.dirstate
1440 dirstate = repo.dirstate
1437
1441
1438 def setupone():
1442 def setupone():
1439 dirstate.invalidate()
1443 dirstate.invalidate()
1440 clearfilecache(dirstate, b'_ignore')
1444 clearfilecache(dirstate, b'_ignore')
1441
1445
1442 def runone():
1446 def runone():
1443 dirstate._ignore
1447 dirstate._ignore
1444
1448
1445 timer(runone, setup=setupone, title=b"load")
1449 timer(runone, setup=setupone, title=b"load")
1446 fm.end()
1450 fm.end()
1447
1451
1448
1452
1449 @command(
1453 @command(
1450 b'perfindex',
1454 b'perfindex',
1451 [
1455 [
1452 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1456 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1453 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1457 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1454 ]
1458 ]
1455 + formatteropts,
1459 + formatteropts,
1456 )
1460 )
1457 def perfindex(ui, repo, **opts):
1461 def perfindex(ui, repo, **opts):
1458 """benchmark index creation time followed by a lookup
1462 """benchmark index creation time followed by a lookup
1459
1463
1460 The default is to look `tip` up. Depending on the index implementation,
1464 The default is to look `tip` up. Depending on the index implementation,
1461 the revision looked up can matters. For example, an implementation
1465 the revision looked up can matters. For example, an implementation
1462 scanning the index will have a faster lookup time for `--rev tip` than for
1466 scanning the index will have a faster lookup time for `--rev tip` than for
1463 `--rev 0`. The number of looked up revisions and their order can also
1467 `--rev 0`. The number of looked up revisions and their order can also
1464 matters.
1468 matters.
1465
1469
1466 Example of useful set to test:
1470 Example of useful set to test:
1467 * tip
1471 * tip
1468 * 0
1472 * 0
1469 * -10:
1473 * -10:
1470 * :10
1474 * :10
1471 * -10: + :10
1475 * -10: + :10
1472 * :10: + -10:
1476 * :10: + -10:
1473 * -10000:
1477 * -10000:
1474 * -10000: + 0
1478 * -10000: + 0
1475
1479
1476 It is not currently possible to check for lookup of a missing node. For
1480 It is not currently possible to check for lookup of a missing node. For
1477 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1481 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1478 import mercurial.revlog
1482 import mercurial.revlog
1479
1483
1480 opts = _byteskwargs(opts)
1484 opts = _byteskwargs(opts)
1481 timer, fm = gettimer(ui, opts)
1485 timer, fm = gettimer(ui, opts)
1482 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1486 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1483 if opts[b'no_lookup']:
1487 if opts[b'no_lookup']:
1484 if opts['rev']:
1488 if opts['rev']:
1485 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1489 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1486 nodes = []
1490 nodes = []
1487 elif not opts[b'rev']:
1491 elif not opts[b'rev']:
1488 nodes = [repo[b"tip"].node()]
1492 nodes = [repo[b"tip"].node()]
1489 else:
1493 else:
1490 revs = scmutil.revrange(repo, opts[b'rev'])
1494 revs = scmutil.revrange(repo, opts[b'rev'])
1491 cl = repo.changelog
1495 cl = repo.changelog
1492 nodes = [cl.node(r) for r in revs]
1496 nodes = [cl.node(r) for r in revs]
1493
1497
1494 unfi = repo.unfiltered()
1498 unfi = repo.unfiltered()
1495 # find the filecache func directly
1499 # find the filecache func directly
1496 # This avoid polluting the benchmark with the filecache logic
1500 # This avoid polluting the benchmark with the filecache logic
1497 makecl = unfi.__class__.changelog.func
1501 makecl = unfi.__class__.changelog.func
1498
1502
1499 def setup():
1503 def setup():
1500 # probably not necessary, but for good measure
1504 # probably not necessary, but for good measure
1501 clearchangelog(unfi)
1505 clearchangelog(unfi)
1502
1506
1503 def d():
1507 def d():
1504 cl = makecl(unfi)
1508 cl = makecl(unfi)
1505 for n in nodes:
1509 for n in nodes:
1506 cl.rev(n)
1510 cl.rev(n)
1507
1511
1508 timer(d, setup=setup)
1512 timer(d, setup=setup)
1509 fm.end()
1513 fm.end()
1510
1514
1511
1515
1512 @command(
1516 @command(
1513 b'perfnodemap',
1517 b'perfnodemap',
1514 [
1518 [
1515 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1519 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1516 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1520 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1517 ]
1521 ]
1518 + formatteropts,
1522 + formatteropts,
1519 )
1523 )
1520 def perfnodemap(ui, repo, **opts):
1524 def perfnodemap(ui, repo, **opts):
1521 """benchmark the time necessary to look up revision from a cold nodemap
1525 """benchmark the time necessary to look up revision from a cold nodemap
1522
1526
1523 Depending on the implementation, the amount and order of revision we look
1527 Depending on the implementation, the amount and order of revision we look
1524 up can varies. Example of useful set to test:
1528 up can varies. Example of useful set to test:
1525 * tip
1529 * tip
1526 * 0
1530 * 0
1527 * -10:
1531 * -10:
1528 * :10
1532 * :10
1529 * -10: + :10
1533 * -10: + :10
1530 * :10: + -10:
1534 * :10: + -10:
1531 * -10000:
1535 * -10000:
1532 * -10000: + 0
1536 * -10000: + 0
1533
1537
1534 The command currently focus on valid binary lookup. Benchmarking for
1538 The command currently focus on valid binary lookup. Benchmarking for
1535 hexlookup, prefix lookup and missing lookup would also be valuable.
1539 hexlookup, prefix lookup and missing lookup would also be valuable.
1536 """
1540 """
1537 import mercurial.revlog
1541 import mercurial.revlog
1538
1542
1539 opts = _byteskwargs(opts)
1543 opts = _byteskwargs(opts)
1540 timer, fm = gettimer(ui, opts)
1544 timer, fm = gettimer(ui, opts)
1541 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1545 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1542
1546
1543 unfi = repo.unfiltered()
1547 unfi = repo.unfiltered()
1544 clearcaches = opts['clear_caches']
1548 clearcaches = opts['clear_caches']
1545 # find the filecache func directly
1549 # find the filecache func directly
1546 # This avoid polluting the benchmark with the filecache logic
1550 # This avoid polluting the benchmark with the filecache logic
1547 makecl = unfi.__class__.changelog.func
1551 makecl = unfi.__class__.changelog.func
1548 if not opts[b'rev']:
1552 if not opts[b'rev']:
1549 raise error.Abort('use --rev to specify revisions to look up')
1553 raise error.Abort('use --rev to specify revisions to look up')
1550 revs = scmutil.revrange(repo, opts[b'rev'])
1554 revs = scmutil.revrange(repo, opts[b'rev'])
1551 cl = repo.changelog
1555 cl = repo.changelog
1552 nodes = [cl.node(r) for r in revs]
1556 nodes = [cl.node(r) for r in revs]
1553
1557
1554 # use a list to pass reference to a nodemap from one closure to the next
1558 # use a list to pass reference to a nodemap from one closure to the next
1555 nodeget = [None]
1559 nodeget = [None]
1556
1560
1557 def setnodeget():
1561 def setnodeget():
1558 # probably not necessary, but for good measure
1562 # probably not necessary, but for good measure
1559 clearchangelog(unfi)
1563 clearchangelog(unfi)
1560 nodeget[0] = makecl(unfi).nodemap.get
1564 nodeget[0] = makecl(unfi).nodemap.get
1561
1565
1562 def d():
1566 def d():
1563 get = nodeget[0]
1567 get = nodeget[0]
1564 for n in nodes:
1568 for n in nodes:
1565 get(n)
1569 get(n)
1566
1570
1567 setup = None
1571 setup = None
1568 if clearcaches:
1572 if clearcaches:
1569
1573
1570 def setup():
1574 def setup():
1571 setnodeget()
1575 setnodeget()
1572
1576
1573 else:
1577 else:
1574 setnodeget()
1578 setnodeget()
1575 d() # prewarm the data structure
1579 d() # prewarm the data structure
1576 timer(d, setup=setup)
1580 timer(d, setup=setup)
1577 fm.end()
1581 fm.end()
1578
1582
1579
1583
1580 @command(b'perfstartup', formatteropts)
1584 @command(b'perfstartup', formatteropts)
1581 def perfstartup(ui, repo, **opts):
1585 def perfstartup(ui, repo, **opts):
1582 opts = _byteskwargs(opts)
1586 opts = _byteskwargs(opts)
1583 timer, fm = gettimer(ui, opts)
1587 timer, fm = gettimer(ui, opts)
1584
1588
1585 def d():
1589 def d():
1586 if os.name != r'nt':
1590 if os.name != r'nt':
1587 os.system(
1591 os.system(
1588 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1592 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1589 )
1593 )
1590 else:
1594 else:
1591 os.environ[r'HGRCPATH'] = r' '
1595 os.environ[r'HGRCPATH'] = r' '
1592 os.system(r"%s version -q > NUL" % sys.argv[0])
1596 os.system(r"%s version -q > NUL" % sys.argv[0])
1593
1597
1594 timer(d)
1598 timer(d)
1595 fm.end()
1599 fm.end()
1596
1600
1597
1601
1598 @command(b'perfparents', formatteropts)
1602 @command(b'perfparents', formatteropts)
1599 def perfparents(ui, repo, **opts):
1603 def perfparents(ui, repo, **opts):
1600 """benchmark the time necessary to fetch one changeset's parents.
1604 """benchmark the time necessary to fetch one changeset's parents.
1601
1605
1602 The fetch is done using the `node identifier`, traversing all object layers
1606 The fetch is done using the `node identifier`, traversing all object layers
1603 from the repository object. The first N revisions will be used for this
1607 from the repository object. The first N revisions will be used for this
1604 benchmark. N is controlled by the ``perf.parentscount`` config option
1608 benchmark. N is controlled by the ``perf.parentscount`` config option
1605 (default: 1000).
1609 (default: 1000).
1606 """
1610 """
1607 opts = _byteskwargs(opts)
1611 opts = _byteskwargs(opts)
1608 timer, fm = gettimer(ui, opts)
1612 timer, fm = gettimer(ui, opts)
1609 # control the number of commits perfparents iterates over
1613 # control the number of commits perfparents iterates over
1610 # experimental config: perf.parentscount
1614 # experimental config: perf.parentscount
1611 count = getint(ui, b"perf", b"parentscount", 1000)
1615 count = getint(ui, b"perf", b"parentscount", 1000)
1612 if len(repo.changelog) < count:
1616 if len(repo.changelog) < count:
1613 raise error.Abort(b"repo needs %d commits for this test" % count)
1617 raise error.Abort(b"repo needs %d commits for this test" % count)
1614 repo = repo.unfiltered()
1618 repo = repo.unfiltered()
1615 nl = [repo.changelog.node(i) for i in _xrange(count)]
1619 nl = [repo.changelog.node(i) for i in _xrange(count)]
1616
1620
1617 def d():
1621 def d():
1618 for n in nl:
1622 for n in nl:
1619 repo.changelog.parents(n)
1623 repo.changelog.parents(n)
1620
1624
1621 timer(d)
1625 timer(d)
1622 fm.end()
1626 fm.end()
1623
1627
1624
1628
1625 @command(b'perfctxfiles', formatteropts)
1629 @command(b'perfctxfiles', formatteropts)
1626 def perfctxfiles(ui, repo, x, **opts):
1630 def perfctxfiles(ui, repo, x, **opts):
1627 opts = _byteskwargs(opts)
1631 opts = _byteskwargs(opts)
1628 x = int(x)
1632 x = int(x)
1629 timer, fm = gettimer(ui, opts)
1633 timer, fm = gettimer(ui, opts)
1630
1634
1631 def d():
1635 def d():
1632 len(repo[x].files())
1636 len(repo[x].files())
1633
1637
1634 timer(d)
1638 timer(d)
1635 fm.end()
1639 fm.end()
1636
1640
1637
1641
1638 @command(b'perfrawfiles', formatteropts)
1642 @command(b'perfrawfiles', formatteropts)
1639 def perfrawfiles(ui, repo, x, **opts):
1643 def perfrawfiles(ui, repo, x, **opts):
1640 opts = _byteskwargs(opts)
1644 opts = _byteskwargs(opts)
1641 x = int(x)
1645 x = int(x)
1642 timer, fm = gettimer(ui, opts)
1646 timer, fm = gettimer(ui, opts)
1643 cl = repo.changelog
1647 cl = repo.changelog
1644
1648
1645 def d():
1649 def d():
1646 len(cl.read(x)[3])
1650 len(cl.read(x)[3])
1647
1651
1648 timer(d)
1652 timer(d)
1649 fm.end()
1653 fm.end()
1650
1654
1651
1655
1652 @command(b'perflookup', formatteropts)
1656 @command(b'perflookup', formatteropts)
1653 def perflookup(ui, repo, rev, **opts):
1657 def perflookup(ui, repo, rev, **opts):
1654 opts = _byteskwargs(opts)
1658 opts = _byteskwargs(opts)
1655 timer, fm = gettimer(ui, opts)
1659 timer, fm = gettimer(ui, opts)
1656 timer(lambda: len(repo.lookup(rev)))
1660 timer(lambda: len(repo.lookup(rev)))
1657 fm.end()
1661 fm.end()
1658
1662
1659
1663
1660 @command(
1664 @command(
1661 b'perflinelogedits',
1665 b'perflinelogedits',
1662 [
1666 [
1663 (b'n', b'edits', 10000, b'number of edits'),
1667 (b'n', b'edits', 10000, b'number of edits'),
1664 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1668 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1665 ],
1669 ],
1666 norepo=True,
1670 norepo=True,
1667 )
1671 )
1668 def perflinelogedits(ui, **opts):
1672 def perflinelogedits(ui, **opts):
1669 from mercurial import linelog
1673 from mercurial import linelog
1670
1674
1671 opts = _byteskwargs(opts)
1675 opts = _byteskwargs(opts)
1672
1676
1673 edits = opts[b'edits']
1677 edits = opts[b'edits']
1674 maxhunklines = opts[b'max_hunk_lines']
1678 maxhunklines = opts[b'max_hunk_lines']
1675
1679
1676 maxb1 = 100000
1680 maxb1 = 100000
1677 random.seed(0)
1681 random.seed(0)
1678 randint = random.randint
1682 randint = random.randint
1679 currentlines = 0
1683 currentlines = 0
1680 arglist = []
1684 arglist = []
1681 for rev in _xrange(edits):
1685 for rev in _xrange(edits):
1682 a1 = randint(0, currentlines)
1686 a1 = randint(0, currentlines)
1683 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1687 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1684 b1 = randint(0, maxb1)
1688 b1 = randint(0, maxb1)
1685 b2 = randint(b1, b1 + maxhunklines)
1689 b2 = randint(b1, b1 + maxhunklines)
1686 currentlines += (b2 - b1) - (a2 - a1)
1690 currentlines += (b2 - b1) - (a2 - a1)
1687 arglist.append((rev, a1, a2, b1, b2))
1691 arglist.append((rev, a1, a2, b1, b2))
1688
1692
1689 def d():
1693 def d():
1690 ll = linelog.linelog()
1694 ll = linelog.linelog()
1691 for args in arglist:
1695 for args in arglist:
1692 ll.replacelines(*args)
1696 ll.replacelines(*args)
1693
1697
1694 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1695 timer(d)
1699 timer(d)
1696 fm.end()
1700 fm.end()
1697
1701
1698
1702
1699 @command(b'perfrevrange', formatteropts)
1703 @command(b'perfrevrange', formatteropts)
1700 def perfrevrange(ui, repo, *specs, **opts):
1704 def perfrevrange(ui, repo, *specs, **opts):
1701 opts = _byteskwargs(opts)
1705 opts = _byteskwargs(opts)
1702 timer, fm = gettimer(ui, opts)
1706 timer, fm = gettimer(ui, opts)
1703 revrange = scmutil.revrange
1707 revrange = scmutil.revrange
1704 timer(lambda: len(revrange(repo, specs)))
1708 timer(lambda: len(revrange(repo, specs)))
1705 fm.end()
1709 fm.end()
1706
1710
1707
1711
1708 @command(b'perfnodelookup', formatteropts)
1712 @command(b'perfnodelookup', formatteropts)
1709 def perfnodelookup(ui, repo, rev, **opts):
1713 def perfnodelookup(ui, repo, rev, **opts):
1710 opts = _byteskwargs(opts)
1714 opts = _byteskwargs(opts)
1711 timer, fm = gettimer(ui, opts)
1715 timer, fm = gettimer(ui, opts)
1712 import mercurial.revlog
1716 import mercurial.revlog
1713
1717
1714 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1718 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1715 n = scmutil.revsingle(repo, rev).node()
1719 n = scmutil.revsingle(repo, rev).node()
1716 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1720 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1717
1721
1718 def d():
1722 def d():
1719 cl.rev(n)
1723 cl.rev(n)
1720 clearcaches(cl)
1724 clearcaches(cl)
1721
1725
1722 timer(d)
1726 timer(d)
1723 fm.end()
1727 fm.end()
1724
1728
1725
1729
1726 @command(
1730 @command(
1727 b'perflog',
1731 b'perflog',
1728 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1732 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1729 )
1733 )
1730 def perflog(ui, repo, rev=None, **opts):
1734 def perflog(ui, repo, rev=None, **opts):
1731 opts = _byteskwargs(opts)
1735 opts = _byteskwargs(opts)
1732 if rev is None:
1736 if rev is None:
1733 rev = []
1737 rev = []
1734 timer, fm = gettimer(ui, opts)
1738 timer, fm = gettimer(ui, opts)
1735 ui.pushbuffer()
1739 ui.pushbuffer()
1736 timer(
1740 timer(
1737 lambda: commands.log(
1741 lambda: commands.log(
1738 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1742 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1739 )
1743 )
1740 )
1744 )
1741 ui.popbuffer()
1745 ui.popbuffer()
1742 fm.end()
1746 fm.end()
1743
1747
1744
1748
1745 @command(b'perfmoonwalk', formatteropts)
1749 @command(b'perfmoonwalk', formatteropts)
1746 def perfmoonwalk(ui, repo, **opts):
1750 def perfmoonwalk(ui, repo, **opts):
1747 """benchmark walking the changelog backwards
1751 """benchmark walking the changelog backwards
1748
1752
1749 This also loads the changelog data for each revision in the changelog.
1753 This also loads the changelog data for each revision in the changelog.
1750 """
1754 """
1751 opts = _byteskwargs(opts)
1755 opts = _byteskwargs(opts)
1752 timer, fm = gettimer(ui, opts)
1756 timer, fm = gettimer(ui, opts)
1753
1757
1754 def moonwalk():
1758 def moonwalk():
1755 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1759 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1756 ctx = repo[i]
1760 ctx = repo[i]
1757 ctx.branch() # read changelog data (in addition to the index)
1761 ctx.branch() # read changelog data (in addition to the index)
1758
1762
1759 timer(moonwalk)
1763 timer(moonwalk)
1760 fm.end()
1764 fm.end()
1761
1765
1762
1766
1763 @command(
1767 @command(
1764 b'perftemplating',
1768 b'perftemplating',
1765 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1769 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1766 )
1770 )
1767 def perftemplating(ui, repo, testedtemplate=None, **opts):
1771 def perftemplating(ui, repo, testedtemplate=None, **opts):
1768 """test the rendering time of a given template"""
1772 """test the rendering time of a given template"""
1769 if makelogtemplater is None:
1773 if makelogtemplater is None:
1770 raise error.Abort(
1774 raise error.Abort(
1771 b"perftemplating not available with this Mercurial",
1775 b"perftemplating not available with this Mercurial",
1772 hint=b"use 4.3 or later",
1776 hint=b"use 4.3 or later",
1773 )
1777 )
1774
1778
1775 opts = _byteskwargs(opts)
1779 opts = _byteskwargs(opts)
1776
1780
1777 nullui = ui.copy()
1781 nullui = ui.copy()
1778 nullui.fout = open(os.devnull, r'wb')
1782 nullui.fout = open(os.devnull, r'wb')
1779 nullui.disablepager()
1783 nullui.disablepager()
1780 revs = opts.get(b'rev')
1784 revs = opts.get(b'rev')
1781 if not revs:
1785 if not revs:
1782 revs = [b'all()']
1786 revs = [b'all()']
1783 revs = list(scmutil.revrange(repo, revs))
1787 revs = list(scmutil.revrange(repo, revs))
1784
1788
1785 defaulttemplate = (
1789 defaulttemplate = (
1786 b'{date|shortdate} [{rev}:{node|short}]'
1790 b'{date|shortdate} [{rev}:{node|short}]'
1787 b' {author|person}: {desc|firstline}\n'
1791 b' {author|person}: {desc|firstline}\n'
1788 )
1792 )
1789 if testedtemplate is None:
1793 if testedtemplate is None:
1790 testedtemplate = defaulttemplate
1794 testedtemplate = defaulttemplate
1791 displayer = makelogtemplater(nullui, repo, testedtemplate)
1795 displayer = makelogtemplater(nullui, repo, testedtemplate)
1792
1796
1793 def format():
1797 def format():
1794 for r in revs:
1798 for r in revs:
1795 ctx = repo[r]
1799 ctx = repo[r]
1796 displayer.show(ctx)
1800 displayer.show(ctx)
1797 displayer.flush(ctx)
1801 displayer.flush(ctx)
1798
1802
1799 timer, fm = gettimer(ui, opts)
1803 timer, fm = gettimer(ui, opts)
1800 timer(format)
1804 timer(format)
1801 fm.end()
1805 fm.end()
1802
1806
1803
1807
1804 def _displaystats(ui, opts, entries, data):
1808 def _displaystats(ui, opts, entries, data):
1805 pass
1809 pass
1806 # use a second formatter because the data are quite different, not sure
1810 # use a second formatter because the data are quite different, not sure
1807 # how it flies with the templater.
1811 # how it flies with the templater.
1808 fm = ui.formatter(b'perf-stats', opts)
1812 fm = ui.formatter(b'perf-stats', opts)
1809 for key, title in entries:
1813 for key, title in entries:
1810 values = data[key]
1814 values = data[key]
1811 nbvalues = len(data)
1815 nbvalues = len(data)
1812 values.sort()
1816 values.sort()
1813 stats = {
1817 stats = {
1814 'key': key,
1818 'key': key,
1815 'title': title,
1819 'title': title,
1816 'nbitems': len(values),
1820 'nbitems': len(values),
1817 'min': values[0][0],
1821 'min': values[0][0],
1818 '10%': values[(nbvalues * 10) // 100][0],
1822 '10%': values[(nbvalues * 10) // 100][0],
1819 '25%': values[(nbvalues * 25) // 100][0],
1823 '25%': values[(nbvalues * 25) // 100][0],
1820 '50%': values[(nbvalues * 50) // 100][0],
1824 '50%': values[(nbvalues * 50) // 100][0],
1821 '75%': values[(nbvalues * 75) // 100][0],
1825 '75%': values[(nbvalues * 75) // 100][0],
1822 '80%': values[(nbvalues * 80) // 100][0],
1826 '80%': values[(nbvalues * 80) // 100][0],
1823 '85%': values[(nbvalues * 85) // 100][0],
1827 '85%': values[(nbvalues * 85) // 100][0],
1824 '90%': values[(nbvalues * 90) // 100][0],
1828 '90%': values[(nbvalues * 90) // 100][0],
1825 '95%': values[(nbvalues * 95) // 100][0],
1829 '95%': values[(nbvalues * 95) // 100][0],
1826 '99%': values[(nbvalues * 99) // 100][0],
1830 '99%': values[(nbvalues * 99) // 100][0],
1827 'max': values[-1][0],
1831 'max': values[-1][0],
1828 }
1832 }
1829 fm.startitem()
1833 fm.startitem()
1830 fm.data(**stats)
1834 fm.data(**stats)
1831 # make node pretty for the human output
1835 # make node pretty for the human output
1832 fm.plain('### %s (%d items)\n' % (title, len(values)))
1836 fm.plain('### %s (%d items)\n' % (title, len(values)))
1833 lines = [
1837 lines = [
1834 'min',
1838 'min',
1835 '10%',
1839 '10%',
1836 '25%',
1840 '25%',
1837 '50%',
1841 '50%',
1838 '75%',
1842 '75%',
1839 '80%',
1843 '80%',
1840 '85%',
1844 '85%',
1841 '90%',
1845 '90%',
1842 '95%',
1846 '95%',
1843 '99%',
1847 '99%',
1844 'max',
1848 'max',
1845 ]
1849 ]
1846 for l in lines:
1850 for l in lines:
1847 fm.plain('%s: %s\n' % (l, stats[l]))
1851 fm.plain('%s: %s\n' % (l, stats[l]))
1848 fm.end()
1852 fm.end()
1849
1853
1850
1854
1851 @command(
1855 @command(
1852 b'perfhelper-mergecopies',
1856 b'perfhelper-mergecopies',
1853 formatteropts
1857 formatteropts
1854 + [
1858 + [
1855 (b'r', b'revs', [], b'restrict search to these revisions'),
1859 (b'r', b'revs', [], b'restrict search to these revisions'),
1856 (b'', b'timing', False, b'provides extra data (costly)'),
1860 (b'', b'timing', False, b'provides extra data (costly)'),
1857 (b'', b'stats', False, b'provides statistic about the measured data'),
1861 (b'', b'stats', False, b'provides statistic about the measured data'),
1858 ],
1862 ],
1859 )
1863 )
1860 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1864 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1861 """find statistics about potential parameters for `perfmergecopies`
1865 """find statistics about potential parameters for `perfmergecopies`
1862
1866
1863 This command find (base, p1, p2) triplet relevant for copytracing
1867 This command find (base, p1, p2) triplet relevant for copytracing
1864 benchmarking in the context of a merge. It reports values for some of the
1868 benchmarking in the context of a merge. It reports values for some of the
1865 parameters that impact merge copy tracing time during merge.
1869 parameters that impact merge copy tracing time during merge.
1866
1870
1867 If `--timing` is set, rename detection is run and the associated timing
1871 If `--timing` is set, rename detection is run and the associated timing
1868 will be reported. The extra details come at the cost of slower command
1872 will be reported. The extra details come at the cost of slower command
1869 execution.
1873 execution.
1870
1874
1871 Since rename detection is only run once, other factors might easily
1875 Since rename detection is only run once, other factors might easily
1872 affect the precision of the timing. However it should give a good
1876 affect the precision of the timing. However it should give a good
1873 approximation of which revision triplets are very costly.
1877 approximation of which revision triplets are very costly.
1874 """
1878 """
1875 opts = _byteskwargs(opts)
1879 opts = _byteskwargs(opts)
1876 fm = ui.formatter(b'perf', opts)
1880 fm = ui.formatter(b'perf', opts)
1877 dotiming = opts[b'timing']
1881 dotiming = opts[b'timing']
1878 dostats = opts[b'stats']
1882 dostats = opts[b'stats']
1879
1883
1880 output_template = [
1884 output_template = [
1881 ("base", "%(base)12s"),
1885 ("base", "%(base)12s"),
1882 ("p1", "%(p1.node)12s"),
1886 ("p1", "%(p1.node)12s"),
1883 ("p2", "%(p2.node)12s"),
1887 ("p2", "%(p2.node)12s"),
1884 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1888 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1885 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1889 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1886 ("p1.renames", "%(p1.renamedfiles)12d"),
1890 ("p1.renames", "%(p1.renamedfiles)12d"),
1887 ("p1.time", "%(p1.time)12.3f"),
1891 ("p1.time", "%(p1.time)12.3f"),
1888 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1892 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1889 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1893 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1890 ("p2.renames", "%(p2.renamedfiles)12d"),
1894 ("p2.renames", "%(p2.renamedfiles)12d"),
1891 ("p2.time", "%(p2.time)12.3f"),
1895 ("p2.time", "%(p2.time)12.3f"),
1892 ("renames", "%(nbrenamedfiles)12d"),
1896 ("renames", "%(nbrenamedfiles)12d"),
1893 ("total.time", "%(time)12.3f"),
1897 ("total.time", "%(time)12.3f"),
1894 ]
1898 ]
1895 if not dotiming:
1899 if not dotiming:
1896 output_template = [
1900 output_template = [
1897 i
1901 i
1898 for i in output_template
1902 for i in output_template
1899 if not ('time' in i[0] or 'renames' in i[0])
1903 if not ('time' in i[0] or 'renames' in i[0])
1900 ]
1904 ]
1901 header_names = [h for (h, v) in output_template]
1905 header_names = [h for (h, v) in output_template]
1902 output = ' '.join([v for (h, v) in output_template]) + '\n'
1906 output = ' '.join([v for (h, v) in output_template]) + '\n'
1903 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1907 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1904 fm.plain(header % tuple(header_names))
1908 fm.plain(header % tuple(header_names))
1905
1909
1906 if not revs:
1910 if not revs:
1907 revs = ['all()']
1911 revs = ['all()']
1908 revs = scmutil.revrange(repo, revs)
1912 revs = scmutil.revrange(repo, revs)
1909
1913
1910 if dostats:
1914 if dostats:
1911 alldata = {
1915 alldata = {
1912 'nbrevs': [],
1916 'nbrevs': [],
1913 'nbmissingfiles': [],
1917 'nbmissingfiles': [],
1914 }
1918 }
1915 if dotiming:
1919 if dotiming:
1916 alldata['parentnbrenames'] = []
1920 alldata['parentnbrenames'] = []
1917 alldata['totalnbrenames'] = []
1921 alldata['totalnbrenames'] = []
1918 alldata['parenttime'] = []
1922 alldata['parenttime'] = []
1919 alldata['totaltime'] = []
1923 alldata['totaltime'] = []
1920
1924
1921 roi = repo.revs('merge() and %ld', revs)
1925 roi = repo.revs('merge() and %ld', revs)
1922 for r in roi:
1926 for r in roi:
1923 ctx = repo[r]
1927 ctx = repo[r]
1924 p1 = ctx.p1()
1928 p1 = ctx.p1()
1925 p2 = ctx.p2()
1929 p2 = ctx.p2()
1926 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1930 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1927 for b in bases:
1931 for b in bases:
1928 b = repo[b]
1932 b = repo[b]
1929 p1missing = copies._computeforwardmissing(b, p1)
1933 p1missing = copies._computeforwardmissing(b, p1)
1930 p2missing = copies._computeforwardmissing(b, p2)
1934 p2missing = copies._computeforwardmissing(b, p2)
1931 data = {
1935 data = {
1932 b'base': b.hex(),
1936 b'base': b.hex(),
1933 b'p1.node': p1.hex(),
1937 b'p1.node': p1.hex(),
1934 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1938 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1935 b'p1.nbmissingfiles': len(p1missing),
1939 b'p1.nbmissingfiles': len(p1missing),
1936 b'p2.node': p2.hex(),
1940 b'p2.node': p2.hex(),
1937 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1941 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1938 b'p2.nbmissingfiles': len(p2missing),
1942 b'p2.nbmissingfiles': len(p2missing),
1939 }
1943 }
1940 if dostats:
1944 if dostats:
1941 if p1missing:
1945 if p1missing:
1942 alldata['nbrevs'].append(
1946 alldata['nbrevs'].append(
1943 (data['p1.nbrevs'], b.hex(), p1.hex())
1947 (data['p1.nbrevs'], b.hex(), p1.hex())
1944 )
1948 )
1945 alldata['nbmissingfiles'].append(
1949 alldata['nbmissingfiles'].append(
1946 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1950 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1947 )
1951 )
1948 if p2missing:
1952 if p2missing:
1949 alldata['nbrevs'].append(
1953 alldata['nbrevs'].append(
1950 (data['p2.nbrevs'], b.hex(), p2.hex())
1954 (data['p2.nbrevs'], b.hex(), p2.hex())
1951 )
1955 )
1952 alldata['nbmissingfiles'].append(
1956 alldata['nbmissingfiles'].append(
1953 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1957 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1954 )
1958 )
1955 if dotiming:
1959 if dotiming:
1956 begin = util.timer()
1960 begin = util.timer()
1957 mergedata = copies.mergecopies(repo, p1, p2, b)
1961 mergedata = copies.mergecopies(repo, p1, p2, b)
1958 end = util.timer()
1962 end = util.timer()
1959 # not very stable timing since we did only one run
1963 # not very stable timing since we did only one run
1960 data['time'] = end - begin
1964 data['time'] = end - begin
1961 # mergedata contains five dicts: "copy", "movewithdir",
1965 # mergedata contains five dicts: "copy", "movewithdir",
1962 # "diverge", "renamedelete" and "dirmove".
1966 # "diverge", "renamedelete" and "dirmove".
1963 # The first 4 are about renamed file so lets count that.
1967 # The first 4 are about renamed file so lets count that.
1964 renames = len(mergedata[0])
1968 renames = len(mergedata[0])
1965 renames += len(mergedata[1])
1969 renames += len(mergedata[1])
1966 renames += len(mergedata[2])
1970 renames += len(mergedata[2])
1967 renames += len(mergedata[3])
1971 renames += len(mergedata[3])
1968 data['nbrenamedfiles'] = renames
1972 data['nbrenamedfiles'] = renames
1969 begin = util.timer()
1973 begin = util.timer()
1970 p1renames = copies.pathcopies(b, p1)
1974 p1renames = copies.pathcopies(b, p1)
1971 end = util.timer()
1975 end = util.timer()
1972 data['p1.time'] = end - begin
1976 data['p1.time'] = end - begin
1973 begin = util.timer()
1977 begin = util.timer()
1974 p2renames = copies.pathcopies(b, p2)
1978 p2renames = copies.pathcopies(b, p2)
1975 data['p2.time'] = end - begin
1979 data['p2.time'] = end - begin
1976 end = util.timer()
1980 end = util.timer()
1977 data['p1.renamedfiles'] = len(p1renames)
1981 data['p1.renamedfiles'] = len(p1renames)
1978 data['p2.renamedfiles'] = len(p2renames)
1982 data['p2.renamedfiles'] = len(p2renames)
1979
1983
1980 if dostats:
1984 if dostats:
1981 if p1missing:
1985 if p1missing:
1982 alldata['parentnbrenames'].append(
1986 alldata['parentnbrenames'].append(
1983 (data['p1.renamedfiles'], b.hex(), p1.hex())
1987 (data['p1.renamedfiles'], b.hex(), p1.hex())
1984 )
1988 )
1985 alldata['parenttime'].append(
1989 alldata['parenttime'].append(
1986 (data['p1.time'], b.hex(), p1.hex())
1990 (data['p1.time'], b.hex(), p1.hex())
1987 )
1991 )
1988 if p2missing:
1992 if p2missing:
1989 alldata['parentnbrenames'].append(
1993 alldata['parentnbrenames'].append(
1990 (data['p2.renamedfiles'], b.hex(), p2.hex())
1994 (data['p2.renamedfiles'], b.hex(), p2.hex())
1991 )
1995 )
1992 alldata['parenttime'].append(
1996 alldata['parenttime'].append(
1993 (data['p2.time'], b.hex(), p2.hex())
1997 (data['p2.time'], b.hex(), p2.hex())
1994 )
1998 )
1995 if p1missing or p2missing:
1999 if p1missing or p2missing:
1996 alldata['totalnbrenames'].append(
2000 alldata['totalnbrenames'].append(
1997 (
2001 (
1998 data['nbrenamedfiles'],
2002 data['nbrenamedfiles'],
1999 b.hex(),
2003 b.hex(),
2000 p1.hex(),
2004 p1.hex(),
2001 p2.hex(),
2005 p2.hex(),
2002 )
2006 )
2003 )
2007 )
2004 alldata['totaltime'].append(
2008 alldata['totaltime'].append(
2005 (data['time'], b.hex(), p1.hex(), p2.hex())
2009 (data['time'], b.hex(), p1.hex(), p2.hex())
2006 )
2010 )
2007 fm.startitem()
2011 fm.startitem()
2008 fm.data(**data)
2012 fm.data(**data)
2009 # make node pretty for the human output
2013 # make node pretty for the human output
2010 out = data.copy()
2014 out = data.copy()
2011 out['base'] = fm.hexfunc(b.node())
2015 out['base'] = fm.hexfunc(b.node())
2012 out['p1.node'] = fm.hexfunc(p1.node())
2016 out['p1.node'] = fm.hexfunc(p1.node())
2013 out['p2.node'] = fm.hexfunc(p2.node())
2017 out['p2.node'] = fm.hexfunc(p2.node())
2014 fm.plain(output % out)
2018 fm.plain(output % out)
2015
2019
2016 fm.end()
2020 fm.end()
2017 if dostats:
2021 if dostats:
2018 # use a second formatter because the data are quite different, not sure
2022 # use a second formatter because the data are quite different, not sure
2019 # how it flies with the templater.
2023 # how it flies with the templater.
2020 entries = [
2024 entries = [
2021 ('nbrevs', 'number of revision covered'),
2025 ('nbrevs', 'number of revision covered'),
2022 ('nbmissingfiles', 'number of missing files at head'),
2026 ('nbmissingfiles', 'number of missing files at head'),
2023 ]
2027 ]
2024 if dotiming:
2028 if dotiming:
2025 entries.append(
2029 entries.append(
2026 ('parentnbrenames', 'rename from one parent to base')
2030 ('parentnbrenames', 'rename from one parent to base')
2027 )
2031 )
2028 entries.append(('totalnbrenames', 'total number of renames'))
2032 entries.append(('totalnbrenames', 'total number of renames'))
2029 entries.append(('parenttime', 'time for one parent'))
2033 entries.append(('parenttime', 'time for one parent'))
2030 entries.append(('totaltime', 'time for both parents'))
2034 entries.append(('totaltime', 'time for both parents'))
2031 _displaystats(ui, opts, entries, alldata)
2035 _displaystats(ui, opts, entries, alldata)
2032
2036
2033
2037
2034 @command(
2038 @command(
2035 b'perfhelper-pathcopies',
2039 b'perfhelper-pathcopies',
2036 formatteropts
2040 formatteropts
2037 + [
2041 + [
2038 (b'r', b'revs', [], b'restrict search to these revisions'),
2042 (b'r', b'revs', [], b'restrict search to these revisions'),
2039 (b'', b'timing', False, b'provides extra data (costly)'),
2043 (b'', b'timing', False, b'provides extra data (costly)'),
2040 (b'', b'stats', False, b'provides statistic about the measured data'),
2044 (b'', b'stats', False, b'provides statistic about the measured data'),
2041 ],
2045 ],
2042 )
2046 )
2043 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2047 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2044 """find statistic about potential parameters for the `perftracecopies`
2048 """find statistic about potential parameters for the `perftracecopies`
2045
2049
2046 This command find source-destination pair relevant for copytracing testing.
2050 This command find source-destination pair relevant for copytracing testing.
2047 It report value for some of the parameters that impact copy tracing time.
2051 It report value for some of the parameters that impact copy tracing time.
2048
2052
2049 If `--timing` is set, rename detection is run and the associated timing
2053 If `--timing` is set, rename detection is run and the associated timing
2050 will be reported. The extra details comes at the cost of a slower command
2054 will be reported. The extra details comes at the cost of a slower command
2051 execution.
2055 execution.
2052
2056
2053 Since the rename detection is only run once, other factors might easily
2057 Since the rename detection is only run once, other factors might easily
2054 affect the precision of the timing. However it should give a good
2058 affect the precision of the timing. However it should give a good
2055 approximation of which revision pairs are very costly.
2059 approximation of which revision pairs are very costly.
2056 """
2060 """
2057 opts = _byteskwargs(opts)
2061 opts = _byteskwargs(opts)
2058 fm = ui.formatter(b'perf', opts)
2062 fm = ui.formatter(b'perf', opts)
2059 dotiming = opts[b'timing']
2063 dotiming = opts[b'timing']
2060 dostats = opts[b'stats']
2064 dostats = opts[b'stats']
2061
2065
2062 if dotiming:
2066 if dotiming:
2063 header = '%12s %12s %12s %12s %12s %12s\n'
2067 header = '%12s %12s %12s %12s %12s %12s\n'
2064 output = (
2068 output = (
2065 "%(source)12s %(destination)12s "
2069 "%(source)12s %(destination)12s "
2066 "%(nbrevs)12d %(nbmissingfiles)12d "
2070 "%(nbrevs)12d %(nbmissingfiles)12d "
2067 "%(nbrenamedfiles)12d %(time)18.5f\n"
2071 "%(nbrenamedfiles)12d %(time)18.5f\n"
2068 )
2072 )
2069 header_names = (
2073 header_names = (
2070 "source",
2074 "source",
2071 "destination",
2075 "destination",
2072 "nb-revs",
2076 "nb-revs",
2073 "nb-files",
2077 "nb-files",
2074 "nb-renames",
2078 "nb-renames",
2075 "time",
2079 "time",
2076 )
2080 )
2077 fm.plain(header % header_names)
2081 fm.plain(header % header_names)
2078 else:
2082 else:
2079 header = '%12s %12s %12s %12s\n'
2083 header = '%12s %12s %12s %12s\n'
2080 output = (
2084 output = (
2081 "%(source)12s %(destination)12s "
2085 "%(source)12s %(destination)12s "
2082 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2086 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2083 )
2087 )
2084 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2088 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2085
2089
2086 if not revs:
2090 if not revs:
2087 revs = ['all()']
2091 revs = ['all()']
2088 revs = scmutil.revrange(repo, revs)
2092 revs = scmutil.revrange(repo, revs)
2089
2093
2090 if dostats:
2094 if dostats:
2091 alldata = {
2095 alldata = {
2092 'nbrevs': [],
2096 'nbrevs': [],
2093 'nbmissingfiles': [],
2097 'nbmissingfiles': [],
2094 }
2098 }
2095 if dotiming:
2099 if dotiming:
2096 alldata['nbrenames'] = []
2100 alldata['nbrenames'] = []
2097 alldata['time'] = []
2101 alldata['time'] = []
2098
2102
2099 roi = repo.revs('merge() and %ld', revs)
2103 roi = repo.revs('merge() and %ld', revs)
2100 for r in roi:
2104 for r in roi:
2101 ctx = repo[r]
2105 ctx = repo[r]
2102 p1 = ctx.p1().rev()
2106 p1 = ctx.p1().rev()
2103 p2 = ctx.p2().rev()
2107 p2 = ctx.p2().rev()
2104 bases = repo.changelog._commonancestorsheads(p1, p2)
2108 bases = repo.changelog._commonancestorsheads(p1, p2)
2105 for p in (p1, p2):
2109 for p in (p1, p2):
2106 for b in bases:
2110 for b in bases:
2107 base = repo[b]
2111 base = repo[b]
2108 parent = repo[p]
2112 parent = repo[p]
2109 missing = copies._computeforwardmissing(base, parent)
2113 missing = copies._computeforwardmissing(base, parent)
2110 if not missing:
2114 if not missing:
2111 continue
2115 continue
2112 data = {
2116 data = {
2113 b'source': base.hex(),
2117 b'source': base.hex(),
2114 b'destination': parent.hex(),
2118 b'destination': parent.hex(),
2115 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2119 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2116 b'nbmissingfiles': len(missing),
2120 b'nbmissingfiles': len(missing),
2117 }
2121 }
2118 if dostats:
2122 if dostats:
2119 alldata['nbrevs'].append(
2123 alldata['nbrevs'].append(
2120 (data['nbrevs'], base.hex(), parent.hex(),)
2124 (data['nbrevs'], base.hex(), parent.hex(),)
2121 )
2125 )
2122 alldata['nbmissingfiles'].append(
2126 alldata['nbmissingfiles'].append(
2123 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2127 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2124 )
2128 )
2125 if dotiming:
2129 if dotiming:
2126 begin = util.timer()
2130 begin = util.timer()
2127 renames = copies.pathcopies(base, parent)
2131 renames = copies.pathcopies(base, parent)
2128 end = util.timer()
2132 end = util.timer()
2129 # not very stable timing since we did only one run
2133 # not very stable timing since we did only one run
2130 data['time'] = end - begin
2134 data['time'] = end - begin
2131 data['nbrenamedfiles'] = len(renames)
2135 data['nbrenamedfiles'] = len(renames)
2132 if dostats:
2136 if dostats:
2133 alldata['time'].append(
2137 alldata['time'].append(
2134 (data['time'], base.hex(), parent.hex(),)
2138 (data['time'], base.hex(), parent.hex(),)
2135 )
2139 )
2136 alldata['nbrenames'].append(
2140 alldata['nbrenames'].append(
2137 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2141 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2138 )
2142 )
2139 fm.startitem()
2143 fm.startitem()
2140 fm.data(**data)
2144 fm.data(**data)
2141 out = data.copy()
2145 out = data.copy()
2142 out['source'] = fm.hexfunc(base.node())
2146 out['source'] = fm.hexfunc(base.node())
2143 out['destination'] = fm.hexfunc(parent.node())
2147 out['destination'] = fm.hexfunc(parent.node())
2144 fm.plain(output % out)
2148 fm.plain(output % out)
2145
2149
2146 fm.end()
2150 fm.end()
2147 if dostats:
2151 if dostats:
2148 # use a second formatter because the data are quite different, not sure
2152 # use a second formatter because the data are quite different, not sure
2149 # how it flies with the templater.
2153 # how it flies with the templater.
2150 fm = ui.formatter(b'perf', opts)
2154 fm = ui.formatter(b'perf', opts)
2151 entries = [
2155 entries = [
2152 ('nbrevs', 'number of revision covered'),
2156 ('nbrevs', 'number of revision covered'),
2153 ('nbmissingfiles', 'number of missing files at head'),
2157 ('nbmissingfiles', 'number of missing files at head'),
2154 ]
2158 ]
2155 if dotiming:
2159 if dotiming:
2156 entries.append(('nbrenames', 'renamed files'))
2160 entries.append(('nbrenames', 'renamed files'))
2157 entries.append(('time', 'time'))
2161 entries.append(('time', 'time'))
2158 _displaystats(ui, opts, entries, alldata)
2162 _displaystats(ui, opts, entries, alldata)
2159
2163
2160
2164
2161 @command(b'perfcca', formatteropts)
2165 @command(b'perfcca', formatteropts)
2162 def perfcca(ui, repo, **opts):
2166 def perfcca(ui, repo, **opts):
2163 opts = _byteskwargs(opts)
2167 opts = _byteskwargs(opts)
2164 timer, fm = gettimer(ui, opts)
2168 timer, fm = gettimer(ui, opts)
2165 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2169 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2166 fm.end()
2170 fm.end()
2167
2171
2168
2172
2169 @command(b'perffncacheload', formatteropts)
2173 @command(b'perffncacheload', formatteropts)
2170 def perffncacheload(ui, repo, **opts):
2174 def perffncacheload(ui, repo, **opts):
2171 opts = _byteskwargs(opts)
2175 opts = _byteskwargs(opts)
2172 timer, fm = gettimer(ui, opts)
2176 timer, fm = gettimer(ui, opts)
2173 s = repo.store
2177 s = repo.store
2174
2178
2175 def d():
2179 def d():
2176 s.fncache._load()
2180 s.fncache._load()
2177
2181
2178 timer(d)
2182 timer(d)
2179 fm.end()
2183 fm.end()
2180
2184
2181
2185
2182 @command(b'perffncachewrite', formatteropts)
2186 @command(b'perffncachewrite', formatteropts)
2183 def perffncachewrite(ui, repo, **opts):
2187 def perffncachewrite(ui, repo, **opts):
2184 opts = _byteskwargs(opts)
2188 opts = _byteskwargs(opts)
2185 timer, fm = gettimer(ui, opts)
2189 timer, fm = gettimer(ui, opts)
2186 s = repo.store
2190 s = repo.store
2187 lock = repo.lock()
2191 lock = repo.lock()
2188 s.fncache._load()
2192 s.fncache._load()
2189 tr = repo.transaction(b'perffncachewrite')
2193 tr = repo.transaction(b'perffncachewrite')
2190 tr.addbackup(b'fncache')
2194 tr.addbackup(b'fncache')
2191
2195
2192 def d():
2196 def d():
2193 s.fncache._dirty = True
2197 s.fncache._dirty = True
2194 s.fncache.write(tr)
2198 s.fncache.write(tr)
2195
2199
2196 timer(d)
2200 timer(d)
2197 tr.close()
2201 tr.close()
2198 lock.release()
2202 lock.release()
2199 fm.end()
2203 fm.end()
2200
2204
2201
2205
2202 @command(b'perffncacheencode', formatteropts)
2206 @command(b'perffncacheencode', formatteropts)
2203 def perffncacheencode(ui, repo, **opts):
2207 def perffncacheencode(ui, repo, **opts):
2204 opts = _byteskwargs(opts)
2208 opts = _byteskwargs(opts)
2205 timer, fm = gettimer(ui, opts)
2209 timer, fm = gettimer(ui, opts)
2206 s = repo.store
2210 s = repo.store
2207 s.fncache._load()
2211 s.fncache._load()
2208
2212
2209 def d():
2213 def d():
2210 for p in s.fncache.entries:
2214 for p in s.fncache.entries:
2211 s.encode(p)
2215 s.encode(p)
2212
2216
2213 timer(d)
2217 timer(d)
2214 fm.end()
2218 fm.end()
2215
2219
2216
2220
2217 def _bdiffworker(q, blocks, xdiff, ready, done):
2221 def _bdiffworker(q, blocks, xdiff, ready, done):
2218 while not done.is_set():
2222 while not done.is_set():
2219 pair = q.get()
2223 pair = q.get()
2220 while pair is not None:
2224 while pair is not None:
2221 if xdiff:
2225 if xdiff:
2222 mdiff.bdiff.xdiffblocks(*pair)
2226 mdiff.bdiff.xdiffblocks(*pair)
2223 elif blocks:
2227 elif blocks:
2224 mdiff.bdiff.blocks(*pair)
2228 mdiff.bdiff.blocks(*pair)
2225 else:
2229 else:
2226 mdiff.textdiff(*pair)
2230 mdiff.textdiff(*pair)
2227 q.task_done()
2231 q.task_done()
2228 pair = q.get()
2232 pair = q.get()
2229 q.task_done() # for the None one
2233 q.task_done() # for the None one
2230 with ready:
2234 with ready:
2231 ready.wait()
2235 ready.wait()
2232
2236
2233
2237
2234 def _manifestrevision(repo, mnode):
2238 def _manifestrevision(repo, mnode):
2235 ml = repo.manifestlog
2239 ml = repo.manifestlog
2236
2240
2237 if util.safehasattr(ml, b'getstorage'):
2241 if util.safehasattr(ml, b'getstorage'):
2238 store = ml.getstorage(b'')
2242 store = ml.getstorage(b'')
2239 else:
2243 else:
2240 store = ml._revlog
2244 store = ml._revlog
2241
2245
2242 return store.revision(mnode)
2246 return store.revision(mnode)
2243
2247
2244
2248
2245 @command(
2249 @command(
2246 b'perfbdiff',
2250 b'perfbdiff',
2247 revlogopts
2251 revlogopts
2248 + formatteropts
2252 + formatteropts
2249 + [
2253 + [
2250 (
2254 (
2251 b'',
2255 b'',
2252 b'count',
2256 b'count',
2253 1,
2257 1,
2254 b'number of revisions to test (when using --startrev)',
2258 b'number of revisions to test (when using --startrev)',
2255 ),
2259 ),
2256 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2260 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2257 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2261 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2258 (b'', b'blocks', False, b'test computing diffs into blocks'),
2262 (b'', b'blocks', False, b'test computing diffs into blocks'),
2259 (b'', b'xdiff', False, b'use xdiff algorithm'),
2263 (b'', b'xdiff', False, b'use xdiff algorithm'),
2260 ],
2264 ],
2261 b'-c|-m|FILE REV',
2265 b'-c|-m|FILE REV',
2262 )
2266 )
2263 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2267 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2264 """benchmark a bdiff between revisions
2268 """benchmark a bdiff between revisions
2265
2269
2266 By default, benchmark a bdiff between its delta parent and itself.
2270 By default, benchmark a bdiff between its delta parent and itself.
2267
2271
2268 With ``--count``, benchmark bdiffs between delta parents and self for N
2272 With ``--count``, benchmark bdiffs between delta parents and self for N
2269 revisions starting at the specified revision.
2273 revisions starting at the specified revision.
2270
2274
2271 With ``--alldata``, assume the requested revision is a changeset and
2275 With ``--alldata``, assume the requested revision is a changeset and
2272 measure bdiffs for all changes related to that changeset (manifest
2276 measure bdiffs for all changes related to that changeset (manifest
2273 and filelogs).
2277 and filelogs).
2274 """
2278 """
2275 opts = _byteskwargs(opts)
2279 opts = _byteskwargs(opts)
2276
2280
2277 if opts[b'xdiff'] and not opts[b'blocks']:
2281 if opts[b'xdiff'] and not opts[b'blocks']:
2278 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2282 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2279
2283
2280 if opts[b'alldata']:
2284 if opts[b'alldata']:
2281 opts[b'changelog'] = True
2285 opts[b'changelog'] = True
2282
2286
2283 if opts.get(b'changelog') or opts.get(b'manifest'):
2287 if opts.get(b'changelog') or opts.get(b'manifest'):
2284 file_, rev = None, file_
2288 file_, rev = None, file_
2285 elif rev is None:
2289 elif rev is None:
2286 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2290 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2287
2291
2288 blocks = opts[b'blocks']
2292 blocks = opts[b'blocks']
2289 xdiff = opts[b'xdiff']
2293 xdiff = opts[b'xdiff']
2290 textpairs = []
2294 textpairs = []
2291
2295
2292 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2296 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2293
2297
2294 startrev = r.rev(r.lookup(rev))
2298 startrev = r.rev(r.lookup(rev))
2295 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2299 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2296 if opts[b'alldata']:
2300 if opts[b'alldata']:
2297 # Load revisions associated with changeset.
2301 # Load revisions associated with changeset.
2298 ctx = repo[rev]
2302 ctx = repo[rev]
2299 mtext = _manifestrevision(repo, ctx.manifestnode())
2303 mtext = _manifestrevision(repo, ctx.manifestnode())
2300 for pctx in ctx.parents():
2304 for pctx in ctx.parents():
2301 pman = _manifestrevision(repo, pctx.manifestnode())
2305 pman = _manifestrevision(repo, pctx.manifestnode())
2302 textpairs.append((pman, mtext))
2306 textpairs.append((pman, mtext))
2303
2307
2304 # Load filelog revisions by iterating manifest delta.
2308 # Load filelog revisions by iterating manifest delta.
2305 man = ctx.manifest()
2309 man = ctx.manifest()
2306 pman = ctx.p1().manifest()
2310 pman = ctx.p1().manifest()
2307 for filename, change in pman.diff(man).items():
2311 for filename, change in pman.diff(man).items():
2308 fctx = repo.file(filename)
2312 fctx = repo.file(filename)
2309 f1 = fctx.revision(change[0][0] or -1)
2313 f1 = fctx.revision(change[0][0] or -1)
2310 f2 = fctx.revision(change[1][0] or -1)
2314 f2 = fctx.revision(change[1][0] or -1)
2311 textpairs.append((f1, f2))
2315 textpairs.append((f1, f2))
2312 else:
2316 else:
2313 dp = r.deltaparent(rev)
2317 dp = r.deltaparent(rev)
2314 textpairs.append((r.revision(dp), r.revision(rev)))
2318 textpairs.append((r.revision(dp), r.revision(rev)))
2315
2319
2316 withthreads = threads > 0
2320 withthreads = threads > 0
2317 if not withthreads:
2321 if not withthreads:
2318
2322
2319 def d():
2323 def d():
2320 for pair in textpairs:
2324 for pair in textpairs:
2321 if xdiff:
2325 if xdiff:
2322 mdiff.bdiff.xdiffblocks(*pair)
2326 mdiff.bdiff.xdiffblocks(*pair)
2323 elif blocks:
2327 elif blocks:
2324 mdiff.bdiff.blocks(*pair)
2328 mdiff.bdiff.blocks(*pair)
2325 else:
2329 else:
2326 mdiff.textdiff(*pair)
2330 mdiff.textdiff(*pair)
2327
2331
2328 else:
2332 else:
2329 q = queue()
2333 q = queue()
2330 for i in _xrange(threads):
2334 for i in _xrange(threads):
2331 q.put(None)
2335 q.put(None)
2332 ready = threading.Condition()
2336 ready = threading.Condition()
2333 done = threading.Event()
2337 done = threading.Event()
2334 for i in _xrange(threads):
2338 for i in _xrange(threads):
2335 threading.Thread(
2339 threading.Thread(
2336 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2340 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2337 ).start()
2341 ).start()
2338 q.join()
2342 q.join()
2339
2343
2340 def d():
2344 def d():
2341 for pair in textpairs:
2345 for pair in textpairs:
2342 q.put(pair)
2346 q.put(pair)
2343 for i in _xrange(threads):
2347 for i in _xrange(threads):
2344 q.put(None)
2348 q.put(None)
2345 with ready:
2349 with ready:
2346 ready.notify_all()
2350 ready.notify_all()
2347 q.join()
2351 q.join()
2348
2352
2349 timer, fm = gettimer(ui, opts)
2353 timer, fm = gettimer(ui, opts)
2350 timer(d)
2354 timer(d)
2351 fm.end()
2355 fm.end()
2352
2356
2353 if withthreads:
2357 if withthreads:
2354 done.set()
2358 done.set()
2355 for i in _xrange(threads):
2359 for i in _xrange(threads):
2356 q.put(None)
2360 q.put(None)
2357 with ready:
2361 with ready:
2358 ready.notify_all()
2362 ready.notify_all()
2359
2363
2360
2364
2361 @command(
2365 @command(
2362 b'perfunidiff',
2366 b'perfunidiff',
2363 revlogopts
2367 revlogopts
2364 + formatteropts
2368 + formatteropts
2365 + [
2369 + [
2366 (
2370 (
2367 b'',
2371 b'',
2368 b'count',
2372 b'count',
2369 1,
2373 1,
2370 b'number of revisions to test (when using --startrev)',
2374 b'number of revisions to test (when using --startrev)',
2371 ),
2375 ),
2372 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2376 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2373 ],
2377 ],
2374 b'-c|-m|FILE REV',
2378 b'-c|-m|FILE REV',
2375 )
2379 )
2376 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2380 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2377 """benchmark a unified diff between revisions
2381 """benchmark a unified diff between revisions
2378
2382
2379 This doesn't include any copy tracing - it's just a unified diff
2383 This doesn't include any copy tracing - it's just a unified diff
2380 of the texts.
2384 of the texts.
2381
2385
2382 By default, benchmark a diff between its delta parent and itself.
2386 By default, benchmark a diff between its delta parent and itself.
2383
2387
2384 With ``--count``, benchmark diffs between delta parents and self for N
2388 With ``--count``, benchmark diffs between delta parents and self for N
2385 revisions starting at the specified revision.
2389 revisions starting at the specified revision.
2386
2390
2387 With ``--alldata``, assume the requested revision is a changeset and
2391 With ``--alldata``, assume the requested revision is a changeset and
2388 measure diffs for all changes related to that changeset (manifest
2392 measure diffs for all changes related to that changeset (manifest
2389 and filelogs).
2393 and filelogs).
2390 """
2394 """
2391 opts = _byteskwargs(opts)
2395 opts = _byteskwargs(opts)
2392 if opts[b'alldata']:
2396 if opts[b'alldata']:
2393 opts[b'changelog'] = True
2397 opts[b'changelog'] = True
2394
2398
2395 if opts.get(b'changelog') or opts.get(b'manifest'):
2399 if opts.get(b'changelog') or opts.get(b'manifest'):
2396 file_, rev = None, file_
2400 file_, rev = None, file_
2397 elif rev is None:
2401 elif rev is None:
2398 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2402 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2399
2403
2400 textpairs = []
2404 textpairs = []
2401
2405
2402 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2406 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2403
2407
2404 startrev = r.rev(r.lookup(rev))
2408 startrev = r.rev(r.lookup(rev))
2405 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2409 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2406 if opts[b'alldata']:
2410 if opts[b'alldata']:
2407 # Load revisions associated with changeset.
2411 # Load revisions associated with changeset.
2408 ctx = repo[rev]
2412 ctx = repo[rev]
2409 mtext = _manifestrevision(repo, ctx.manifestnode())
2413 mtext = _manifestrevision(repo, ctx.manifestnode())
2410 for pctx in ctx.parents():
2414 for pctx in ctx.parents():
2411 pman = _manifestrevision(repo, pctx.manifestnode())
2415 pman = _manifestrevision(repo, pctx.manifestnode())
2412 textpairs.append((pman, mtext))
2416 textpairs.append((pman, mtext))
2413
2417
2414 # Load filelog revisions by iterating manifest delta.
2418 # Load filelog revisions by iterating manifest delta.
2415 man = ctx.manifest()
2419 man = ctx.manifest()
2416 pman = ctx.p1().manifest()
2420 pman = ctx.p1().manifest()
2417 for filename, change in pman.diff(man).items():
2421 for filename, change in pman.diff(man).items():
2418 fctx = repo.file(filename)
2422 fctx = repo.file(filename)
2419 f1 = fctx.revision(change[0][0] or -1)
2423 f1 = fctx.revision(change[0][0] or -1)
2420 f2 = fctx.revision(change[1][0] or -1)
2424 f2 = fctx.revision(change[1][0] or -1)
2421 textpairs.append((f1, f2))
2425 textpairs.append((f1, f2))
2422 else:
2426 else:
2423 dp = r.deltaparent(rev)
2427 dp = r.deltaparent(rev)
2424 textpairs.append((r.revision(dp), r.revision(rev)))
2428 textpairs.append((r.revision(dp), r.revision(rev)))
2425
2429
2426 def d():
2430 def d():
2427 for left, right in textpairs:
2431 for left, right in textpairs:
2428 # The date strings don't matter, so we pass empty strings.
2432 # The date strings don't matter, so we pass empty strings.
2429 headerlines, hunks = mdiff.unidiff(
2433 headerlines, hunks = mdiff.unidiff(
2430 left, b'', right, b'', b'left', b'right', binary=False
2434 left, b'', right, b'', b'left', b'right', binary=False
2431 )
2435 )
2432 # consume iterators in roughly the way patch.py does
2436 # consume iterators in roughly the way patch.py does
2433 b'\n'.join(headerlines)
2437 b'\n'.join(headerlines)
2434 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2438 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2435
2439
2436 timer, fm = gettimer(ui, opts)
2440 timer, fm = gettimer(ui, opts)
2437 timer(d)
2441 timer(d)
2438 fm.end()
2442 fm.end()
2439
2443
2440
2444
2441 @command(b'perfdiffwd', formatteropts)
2445 @command(b'perfdiffwd', formatteropts)
2442 def perfdiffwd(ui, repo, **opts):
2446 def perfdiffwd(ui, repo, **opts):
2443 """Profile diff of working directory changes"""
2447 """Profile diff of working directory changes"""
2444 opts = _byteskwargs(opts)
2448 opts = _byteskwargs(opts)
2445 timer, fm = gettimer(ui, opts)
2449 timer, fm = gettimer(ui, opts)
2446 options = {
2450 options = {
2447 'w': 'ignore_all_space',
2451 'w': 'ignore_all_space',
2448 'b': 'ignore_space_change',
2452 'b': 'ignore_space_change',
2449 'B': 'ignore_blank_lines',
2453 'B': 'ignore_blank_lines',
2450 }
2454 }
2451
2455
2452 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2456 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2453 opts = dict((options[c], b'1') for c in diffopt)
2457 opts = dict((options[c], b'1') for c in diffopt)
2454
2458
2455 def d():
2459 def d():
2456 ui.pushbuffer()
2460 ui.pushbuffer()
2457 commands.diff(ui, repo, **opts)
2461 commands.diff(ui, repo, **opts)
2458 ui.popbuffer()
2462 ui.popbuffer()
2459
2463
2460 diffopt = diffopt.encode('ascii')
2464 diffopt = diffopt.encode('ascii')
2461 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2465 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2462 timer(d, title=title)
2466 timer(d, title=title)
2463 fm.end()
2467 fm.end()
2464
2468
2465
2469
2466 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2470 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2467 def perfrevlogindex(ui, repo, file_=None, **opts):
2471 def perfrevlogindex(ui, repo, file_=None, **opts):
2468 """Benchmark operations against a revlog index.
2472 """Benchmark operations against a revlog index.
2469
2473
2470 This tests constructing a revlog instance, reading index data,
2474 This tests constructing a revlog instance, reading index data,
2471 parsing index data, and performing various operations related to
2475 parsing index data, and performing various operations related to
2472 index data.
2476 index data.
2473 """
2477 """
2474
2478
2475 opts = _byteskwargs(opts)
2479 opts = _byteskwargs(opts)
2476
2480
2477 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2481 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2478
2482
2479 opener = getattr(rl, 'opener') # trick linter
2483 opener = getattr(rl, 'opener') # trick linter
2480 indexfile = rl.indexfile
2484 indexfile = rl.indexfile
2481 data = opener.read(indexfile)
2485 data = opener.read(indexfile)
2482
2486
2483 header = struct.unpack(b'>I', data[0:4])[0]
2487 header = struct.unpack(b'>I', data[0:4])[0]
2484 version = header & 0xFFFF
2488 version = header & 0xFFFF
2485 if version == 1:
2489 if version == 1:
2486 revlogio = revlog.revlogio()
2490 revlogio = revlog.revlogio()
2487 inline = header & (1 << 16)
2491 inline = header & (1 << 16)
2488 else:
2492 else:
2489 raise error.Abort(b'unsupported revlog version: %d' % version)
2493 raise error.Abort(b'unsupported revlog version: %d' % version)
2490
2494
2491 rllen = len(rl)
2495 rllen = len(rl)
2492
2496
2493 node0 = rl.node(0)
2497 node0 = rl.node(0)
2494 node25 = rl.node(rllen // 4)
2498 node25 = rl.node(rllen // 4)
2495 node50 = rl.node(rllen // 2)
2499 node50 = rl.node(rllen // 2)
2496 node75 = rl.node(rllen // 4 * 3)
2500 node75 = rl.node(rllen // 4 * 3)
2497 node100 = rl.node(rllen - 1)
2501 node100 = rl.node(rllen - 1)
2498
2502
2499 allrevs = range(rllen)
2503 allrevs = range(rllen)
2500 allrevsrev = list(reversed(allrevs))
2504 allrevsrev = list(reversed(allrevs))
2501 allnodes = [rl.node(rev) for rev in range(rllen)]
2505 allnodes = [rl.node(rev) for rev in range(rllen)]
2502 allnodesrev = list(reversed(allnodes))
2506 allnodesrev = list(reversed(allnodes))
2503
2507
2504 def constructor():
2508 def constructor():
2505 revlog.revlog(opener, indexfile)
2509 revlog.revlog(opener, indexfile)
2506
2510
2507 def read():
2511 def read():
2508 with opener(indexfile) as fh:
2512 with opener(indexfile) as fh:
2509 fh.read()
2513 fh.read()
2510
2514
2511 def parseindex():
2515 def parseindex():
2512 revlogio.parseindex(data, inline)
2516 revlogio.parseindex(data, inline)
2513
2517
2514 def getentry(revornode):
2518 def getentry(revornode):
2515 index = revlogio.parseindex(data, inline)[0]
2519 index = revlogio.parseindex(data, inline)[0]
2516 index[revornode]
2520 index[revornode]
2517
2521
2518 def getentries(revs, count=1):
2522 def getentries(revs, count=1):
2519 index = revlogio.parseindex(data, inline)[0]
2523 index = revlogio.parseindex(data, inline)[0]
2520
2524
2521 for i in range(count):
2525 for i in range(count):
2522 for rev in revs:
2526 for rev in revs:
2523 index[rev]
2527 index[rev]
2524
2528
2525 def resolvenode(node):
2529 def resolvenode(node):
2526 nodemap = revlogio.parseindex(data, inline)[1]
2530 nodemap = revlogio.parseindex(data, inline)[1]
2527 # This only works for the C code.
2531 # This only works for the C code.
2528 if nodemap is None:
2532 if nodemap is None:
2529 return
2533 return
2530
2534
2531 try:
2535 try:
2532 nodemap[node]
2536 nodemap[node]
2533 except error.RevlogError:
2537 except error.RevlogError:
2534 pass
2538 pass
2535
2539
2536 def resolvenodes(nodes, count=1):
2540 def resolvenodes(nodes, count=1):
2537 nodemap = revlogio.parseindex(data, inline)[1]
2541 nodemap = revlogio.parseindex(data, inline)[1]
2538 if nodemap is None:
2542 if nodemap is None:
2539 return
2543 return
2540
2544
2541 for i in range(count):
2545 for i in range(count):
2542 for node in nodes:
2546 for node in nodes:
2543 try:
2547 try:
2544 nodemap[node]
2548 nodemap[node]
2545 except error.RevlogError:
2549 except error.RevlogError:
2546 pass
2550 pass
2547
2551
2548 benches = [
2552 benches = [
2549 (constructor, b'revlog constructor'),
2553 (constructor, b'revlog constructor'),
2550 (read, b'read'),
2554 (read, b'read'),
2551 (parseindex, b'create index object'),
2555 (parseindex, b'create index object'),
2552 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2556 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2553 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2557 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2554 (lambda: resolvenode(node0), b'look up node at rev 0'),
2558 (lambda: resolvenode(node0), b'look up node at rev 0'),
2555 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2559 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2556 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2560 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2557 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2561 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2558 (lambda: resolvenode(node100), b'look up node at tip'),
2562 (lambda: resolvenode(node100), b'look up node at tip'),
2559 # 2x variation is to measure caching impact.
2563 # 2x variation is to measure caching impact.
2560 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2564 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2561 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2565 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2562 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2566 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2563 (
2567 (
2564 lambda: resolvenodes(allnodesrev, 2),
2568 lambda: resolvenodes(allnodesrev, 2),
2565 b'look up all nodes 2x (reverse)',
2569 b'look up all nodes 2x (reverse)',
2566 ),
2570 ),
2567 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2571 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2568 (
2572 (
2569 lambda: getentries(allrevs, 2),
2573 lambda: getentries(allrevs, 2),
2570 b'retrieve all index entries 2x (forward)',
2574 b'retrieve all index entries 2x (forward)',
2571 ),
2575 ),
2572 (
2576 (
2573 lambda: getentries(allrevsrev),
2577 lambda: getentries(allrevsrev),
2574 b'retrieve all index entries (reverse)',
2578 b'retrieve all index entries (reverse)',
2575 ),
2579 ),
2576 (
2580 (
2577 lambda: getentries(allrevsrev, 2),
2581 lambda: getentries(allrevsrev, 2),
2578 b'retrieve all index entries 2x (reverse)',
2582 b'retrieve all index entries 2x (reverse)',
2579 ),
2583 ),
2580 ]
2584 ]
2581
2585
2582 for fn, title in benches:
2586 for fn, title in benches:
2583 timer, fm = gettimer(ui, opts)
2587 timer, fm = gettimer(ui, opts)
2584 timer(fn, title=title)
2588 timer(fn, title=title)
2585 fm.end()
2589 fm.end()
2586
2590
2587
2591
2588 @command(
2592 @command(
2589 b'perfrevlogrevisions',
2593 b'perfrevlogrevisions',
2590 revlogopts
2594 revlogopts
2591 + formatteropts
2595 + formatteropts
2592 + [
2596 + [
2593 (b'd', b'dist', 100, b'distance between the revisions'),
2597 (b'd', b'dist', 100, b'distance between the revisions'),
2594 (b's', b'startrev', 0, b'revision to start reading at'),
2598 (b's', b'startrev', 0, b'revision to start reading at'),
2595 (b'', b'reverse', False, b'read in reverse'),
2599 (b'', b'reverse', False, b'read in reverse'),
2596 ],
2600 ],
2597 b'-c|-m|FILE',
2601 b'-c|-m|FILE',
2598 )
2602 )
2599 def perfrevlogrevisions(
2603 def perfrevlogrevisions(
2600 ui, repo, file_=None, startrev=0, reverse=False, **opts
2604 ui, repo, file_=None, startrev=0, reverse=False, **opts
2601 ):
2605 ):
2602 """Benchmark reading a series of revisions from a revlog.
2606 """Benchmark reading a series of revisions from a revlog.
2603
2607
2604 By default, we read every ``-d/--dist`` revision from 0 to tip of
2608 By default, we read every ``-d/--dist`` revision from 0 to tip of
2605 the specified revlog.
2609 the specified revlog.
2606
2610
2607 The start revision can be defined via ``-s/--startrev``.
2611 The start revision can be defined via ``-s/--startrev``.
2608 """
2612 """
2609 opts = _byteskwargs(opts)
2613 opts = _byteskwargs(opts)
2610
2614
2611 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2615 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2612 rllen = getlen(ui)(rl)
2616 rllen = getlen(ui)(rl)
2613
2617
2614 if startrev < 0:
2618 if startrev < 0:
2615 startrev = rllen + startrev
2619 startrev = rllen + startrev
2616
2620
2617 def d():
2621 def d():
2618 rl.clearcaches()
2622 rl.clearcaches()
2619
2623
2620 beginrev = startrev
2624 beginrev = startrev
2621 endrev = rllen
2625 endrev = rllen
2622 dist = opts[b'dist']
2626 dist = opts[b'dist']
2623
2627
2624 if reverse:
2628 if reverse:
2625 beginrev, endrev = endrev - 1, beginrev - 1
2629 beginrev, endrev = endrev - 1, beginrev - 1
2626 dist = -1 * dist
2630 dist = -1 * dist
2627
2631
2628 for x in _xrange(beginrev, endrev, dist):
2632 for x in _xrange(beginrev, endrev, dist):
2629 # Old revisions don't support passing int.
2633 # Old revisions don't support passing int.
2630 n = rl.node(x)
2634 n = rl.node(x)
2631 rl.revision(n)
2635 rl.revision(n)
2632
2636
2633 timer, fm = gettimer(ui, opts)
2637 timer, fm = gettimer(ui, opts)
2634 timer(d)
2638 timer(d)
2635 fm.end()
2639 fm.end()
2636
2640
2637
2641
2638 @command(
2642 @command(
2639 b'perfrevlogwrite',
2643 b'perfrevlogwrite',
2640 revlogopts
2644 revlogopts
2641 + formatteropts
2645 + formatteropts
2642 + [
2646 + [
2643 (b's', b'startrev', 1000, b'revision to start writing at'),
2647 (b's', b'startrev', 1000, b'revision to start writing at'),
2644 (b'', b'stoprev', -1, b'last revision to write'),
2648 (b'', b'stoprev', -1, b'last revision to write'),
2645 (b'', b'count', 3, b'number of passes to perform'),
2649 (b'', b'count', 3, b'number of passes to perform'),
2646 (b'', b'details', False, b'print timing for every revisions tested'),
2650 (b'', b'details', False, b'print timing for every revisions tested'),
2647 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2651 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2648 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2652 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2649 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2653 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2650 ],
2654 ],
2651 b'-c|-m|FILE',
2655 b'-c|-m|FILE',
2652 )
2656 )
2653 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2657 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2654 """Benchmark writing a series of revisions to a revlog.
2658 """Benchmark writing a series of revisions to a revlog.
2655
2659
2656 Possible source values are:
2660 Possible source values are:
2657 * `full`: add from a full text (default).
2661 * `full`: add from a full text (default).
2658 * `parent-1`: add from a delta to the first parent
2662 * `parent-1`: add from a delta to the first parent
2659 * `parent-2`: add from a delta to the second parent if it exists
2663 * `parent-2`: add from a delta to the second parent if it exists
2660 (use a delta from the first parent otherwise)
2664 (use a delta from the first parent otherwise)
2661 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2665 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2662 * `storage`: add from the existing precomputed deltas
2666 * `storage`: add from the existing precomputed deltas
2663
2667
2664 Note: This performance command measures performance in a custom way. As a
2668 Note: This performance command measures performance in a custom way. As a
2665 result some of the global configuration of the 'perf' command does not
2669 result some of the global configuration of the 'perf' command does not
2666 apply to it:
2670 apply to it:
2667
2671
2668 * ``pre-run``: disabled
2672 * ``pre-run``: disabled
2669
2673
2670 * ``profile-benchmark``: disabled
2674 * ``profile-benchmark``: disabled
2671
2675
2672 * ``run-limits``: disabled use --count instead
2676 * ``run-limits``: disabled use --count instead
2673 """
2677 """
2674 opts = _byteskwargs(opts)
2678 opts = _byteskwargs(opts)
2675
2679
2676 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2680 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2677 rllen = getlen(ui)(rl)
2681 rllen = getlen(ui)(rl)
2678 if startrev < 0:
2682 if startrev < 0:
2679 startrev = rllen + startrev
2683 startrev = rllen + startrev
2680 if stoprev < 0:
2684 if stoprev < 0:
2681 stoprev = rllen + stoprev
2685 stoprev = rllen + stoprev
2682
2686
2683 lazydeltabase = opts['lazydeltabase']
2687 lazydeltabase = opts['lazydeltabase']
2684 source = opts['source']
2688 source = opts['source']
2685 clearcaches = opts['clear_caches']
2689 clearcaches = opts['clear_caches']
2686 validsource = (
2690 validsource = (
2687 b'full',
2691 b'full',
2688 b'parent-1',
2692 b'parent-1',
2689 b'parent-2',
2693 b'parent-2',
2690 b'parent-smallest',
2694 b'parent-smallest',
2691 b'storage',
2695 b'storage',
2692 )
2696 )
2693 if source not in validsource:
2697 if source not in validsource:
2694 raise error.Abort('invalid source type: %s' % source)
2698 raise error.Abort('invalid source type: %s' % source)
2695
2699
2696 ### actually gather results
2700 ### actually gather results
2697 count = opts['count']
2701 count = opts['count']
2698 if count <= 0:
2702 if count <= 0:
2699 raise error.Abort('invalide run count: %d' % count)
2703 raise error.Abort('invalide run count: %d' % count)
2700 allresults = []
2704 allresults = []
2701 for c in range(count):
2705 for c in range(count):
2702 timing = _timeonewrite(
2706 timing = _timeonewrite(
2703 ui,
2707 ui,
2704 rl,
2708 rl,
2705 source,
2709 source,
2706 startrev,
2710 startrev,
2707 stoprev,
2711 stoprev,
2708 c + 1,
2712 c + 1,
2709 lazydeltabase=lazydeltabase,
2713 lazydeltabase=lazydeltabase,
2710 clearcaches=clearcaches,
2714 clearcaches=clearcaches,
2711 )
2715 )
2712 allresults.append(timing)
2716 allresults.append(timing)
2713
2717
2714 ### consolidate the results in a single list
2718 ### consolidate the results in a single list
2715 results = []
2719 results = []
2716 for idx, (rev, t) in enumerate(allresults[0]):
2720 for idx, (rev, t) in enumerate(allresults[0]):
2717 ts = [t]
2721 ts = [t]
2718 for other in allresults[1:]:
2722 for other in allresults[1:]:
2719 orev, ot = other[idx]
2723 orev, ot = other[idx]
2720 assert orev == rev
2724 assert orev == rev
2721 ts.append(ot)
2725 ts.append(ot)
2722 results.append((rev, ts))
2726 results.append((rev, ts))
2723 resultcount = len(results)
2727 resultcount = len(results)
2724
2728
2725 ### Compute and display relevant statistics
2729 ### Compute and display relevant statistics
2726
2730
2727 # get a formatter
2731 # get a formatter
2728 fm = ui.formatter(b'perf', opts)
2732 fm = ui.formatter(b'perf', opts)
2729 displayall = ui.configbool(b"perf", b"all-timing", False)
2733 displayall = ui.configbool(b"perf", b"all-timing", False)
2730
2734
2731 # print individual details if requested
2735 # print individual details if requested
2732 if opts['details']:
2736 if opts['details']:
2733 for idx, item in enumerate(results, 1):
2737 for idx, item in enumerate(results, 1):
2734 rev, data = item
2738 rev, data = item
2735 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2739 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2736 formatone(fm, data, title=title, displayall=displayall)
2740 formatone(fm, data, title=title, displayall=displayall)
2737
2741
2738 # sorts results by median time
2742 # sorts results by median time
2739 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2743 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2740 # list of (name, index) to display)
2744 # list of (name, index) to display)
2741 relevants = [
2745 relevants = [
2742 ("min", 0),
2746 ("min", 0),
2743 ("10%", resultcount * 10 // 100),
2747 ("10%", resultcount * 10 // 100),
2744 ("25%", resultcount * 25 // 100),
2748 ("25%", resultcount * 25 // 100),
2745 ("50%", resultcount * 70 // 100),
2749 ("50%", resultcount * 70 // 100),
2746 ("75%", resultcount * 75 // 100),
2750 ("75%", resultcount * 75 // 100),
2747 ("90%", resultcount * 90 // 100),
2751 ("90%", resultcount * 90 // 100),
2748 ("95%", resultcount * 95 // 100),
2752 ("95%", resultcount * 95 // 100),
2749 ("99%", resultcount * 99 // 100),
2753 ("99%", resultcount * 99 // 100),
2750 ("99.9%", resultcount * 999 // 1000),
2754 ("99.9%", resultcount * 999 // 1000),
2751 ("99.99%", resultcount * 9999 // 10000),
2755 ("99.99%", resultcount * 9999 // 10000),
2752 ("99.999%", resultcount * 99999 // 100000),
2756 ("99.999%", resultcount * 99999 // 100000),
2753 ("max", -1),
2757 ("max", -1),
2754 ]
2758 ]
2755 if not ui.quiet:
2759 if not ui.quiet:
2756 for name, idx in relevants:
2760 for name, idx in relevants:
2757 data = results[idx]
2761 data = results[idx]
2758 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2762 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2759 formatone(fm, data[1], title=title, displayall=displayall)
2763 formatone(fm, data[1], title=title, displayall=displayall)
2760
2764
2761 # XXX summing that many float will not be very precise, we ignore this fact
2765 # XXX summing that many float will not be very precise, we ignore this fact
2762 # for now
2766 # for now
2763 totaltime = []
2767 totaltime = []
2764 for item in allresults:
2768 for item in allresults:
2765 totaltime.append(
2769 totaltime.append(
2766 (
2770 (
2767 sum(x[1][0] for x in item),
2771 sum(x[1][0] for x in item),
2768 sum(x[1][1] for x in item),
2772 sum(x[1][1] for x in item),
2769 sum(x[1][2] for x in item),
2773 sum(x[1][2] for x in item),
2770 )
2774 )
2771 )
2775 )
2772 formatone(
2776 formatone(
2773 fm,
2777 fm,
2774 totaltime,
2778 totaltime,
2775 title="total time (%d revs)" % resultcount,
2779 title="total time (%d revs)" % resultcount,
2776 displayall=displayall,
2780 displayall=displayall,
2777 )
2781 )
2778 fm.end()
2782 fm.end()
2779
2783
2780
2784
2781 class _faketr(object):
2785 class _faketr(object):
2782 def add(s, x, y, z=None):
2786 def add(s, x, y, z=None):
2783 return None
2787 return None
2784
2788
2785
2789
2786 def _timeonewrite(
2790 def _timeonewrite(
2787 ui,
2791 ui,
2788 orig,
2792 orig,
2789 source,
2793 source,
2790 startrev,
2794 startrev,
2791 stoprev,
2795 stoprev,
2792 runidx=None,
2796 runidx=None,
2793 lazydeltabase=True,
2797 lazydeltabase=True,
2794 clearcaches=True,
2798 clearcaches=True,
2795 ):
2799 ):
2796 timings = []
2800 timings = []
2797 tr = _faketr()
2801 tr = _faketr()
2798 with _temprevlog(ui, orig, startrev) as dest:
2802 with _temprevlog(ui, orig, startrev) as dest:
2799 dest._lazydeltabase = lazydeltabase
2803 dest._lazydeltabase = lazydeltabase
2800 revs = list(orig.revs(startrev, stoprev))
2804 revs = list(orig.revs(startrev, stoprev))
2801 total = len(revs)
2805 total = len(revs)
2802 topic = 'adding'
2806 topic = 'adding'
2803 if runidx is not None:
2807 if runidx is not None:
2804 topic += ' (run #%d)' % runidx
2808 topic += ' (run #%d)' % runidx
2805 # Support both old and new progress API
2809 # Support both old and new progress API
2806 if util.safehasattr(ui, 'makeprogress'):
2810 if util.safehasattr(ui, 'makeprogress'):
2807 progress = ui.makeprogress(topic, unit='revs', total=total)
2811 progress = ui.makeprogress(topic, unit='revs', total=total)
2808
2812
2809 def updateprogress(pos):
2813 def updateprogress(pos):
2810 progress.update(pos)
2814 progress.update(pos)
2811
2815
2812 def completeprogress():
2816 def completeprogress():
2813 progress.complete()
2817 progress.complete()
2814
2818
2815 else:
2819 else:
2816
2820
2817 def updateprogress(pos):
2821 def updateprogress(pos):
2818 ui.progress(topic, pos, unit='revs', total=total)
2822 ui.progress(topic, pos, unit='revs', total=total)
2819
2823
2820 def completeprogress():
2824 def completeprogress():
2821 ui.progress(topic, None, unit='revs', total=total)
2825 ui.progress(topic, None, unit='revs', total=total)
2822
2826
2823 for idx, rev in enumerate(revs):
2827 for idx, rev in enumerate(revs):
2824 updateprogress(idx)
2828 updateprogress(idx)
2825 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2829 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2826 if clearcaches:
2830 if clearcaches:
2827 dest.index.clearcaches()
2831 dest.index.clearcaches()
2828 dest.clearcaches()
2832 dest.clearcaches()
2829 with timeone() as r:
2833 with timeone() as r:
2830 dest.addrawrevision(*addargs, **addkwargs)
2834 dest.addrawrevision(*addargs, **addkwargs)
2831 timings.append((rev, r[0]))
2835 timings.append((rev, r[0]))
2832 updateprogress(total)
2836 updateprogress(total)
2833 completeprogress()
2837 completeprogress()
2834 return timings
2838 return timings
2835
2839
2836
2840
2837 def _getrevisionseed(orig, rev, tr, source):
2841 def _getrevisionseed(orig, rev, tr, source):
2838 from mercurial.node import nullid
2842 from mercurial.node import nullid
2839
2843
2840 linkrev = orig.linkrev(rev)
2844 linkrev = orig.linkrev(rev)
2841 node = orig.node(rev)
2845 node = orig.node(rev)
2842 p1, p2 = orig.parents(node)
2846 p1, p2 = orig.parents(node)
2843 flags = orig.flags(rev)
2847 flags = orig.flags(rev)
2844 cachedelta = None
2848 cachedelta = None
2845 text = None
2849 text = None
2846
2850
2847 if source == b'full':
2851 if source == b'full':
2848 text = orig.revision(rev)
2852 text = orig.revision(rev)
2849 elif source == b'parent-1':
2853 elif source == b'parent-1':
2850 baserev = orig.rev(p1)
2854 baserev = orig.rev(p1)
2851 cachedelta = (baserev, orig.revdiff(p1, rev))
2855 cachedelta = (baserev, orig.revdiff(p1, rev))
2852 elif source == b'parent-2':
2856 elif source == b'parent-2':
2853 parent = p2
2857 parent = p2
2854 if p2 == nullid:
2858 if p2 == nullid:
2855 parent = p1
2859 parent = p1
2856 baserev = orig.rev(parent)
2860 baserev = orig.rev(parent)
2857 cachedelta = (baserev, orig.revdiff(parent, rev))
2861 cachedelta = (baserev, orig.revdiff(parent, rev))
2858 elif source == b'parent-smallest':
2862 elif source == b'parent-smallest':
2859 p1diff = orig.revdiff(p1, rev)
2863 p1diff = orig.revdiff(p1, rev)
2860 parent = p1
2864 parent = p1
2861 diff = p1diff
2865 diff = p1diff
2862 if p2 != nullid:
2866 if p2 != nullid:
2863 p2diff = orig.revdiff(p2, rev)
2867 p2diff = orig.revdiff(p2, rev)
2864 if len(p1diff) > len(p2diff):
2868 if len(p1diff) > len(p2diff):
2865 parent = p2
2869 parent = p2
2866 diff = p2diff
2870 diff = p2diff
2867 baserev = orig.rev(parent)
2871 baserev = orig.rev(parent)
2868 cachedelta = (baserev, diff)
2872 cachedelta = (baserev, diff)
2869 elif source == b'storage':
2873 elif source == b'storage':
2870 baserev = orig.deltaparent(rev)
2874 baserev = orig.deltaparent(rev)
2871 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2875 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2872
2876
2873 return (
2877 return (
2874 (text, tr, linkrev, p1, p2),
2878 (text, tr, linkrev, p1, p2),
2875 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2879 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2876 )
2880 )
2877
2881
2878
2882
2879 @contextlib.contextmanager
2883 @contextlib.contextmanager
2880 def _temprevlog(ui, orig, truncaterev):
2884 def _temprevlog(ui, orig, truncaterev):
2881 from mercurial import vfs as vfsmod
2885 from mercurial import vfs as vfsmod
2882
2886
2883 if orig._inline:
2887 if orig._inline:
2884 raise error.Abort('not supporting inline revlog (yet)')
2888 raise error.Abort('not supporting inline revlog (yet)')
2885 revlogkwargs = {}
2889 revlogkwargs = {}
2886 k = 'upperboundcomp'
2890 k = 'upperboundcomp'
2887 if util.safehasattr(orig, k):
2891 if util.safehasattr(orig, k):
2888 revlogkwargs[k] = getattr(orig, k)
2892 revlogkwargs[k] = getattr(orig, k)
2889
2893
2890 origindexpath = orig.opener.join(orig.indexfile)
2894 origindexpath = orig.opener.join(orig.indexfile)
2891 origdatapath = orig.opener.join(orig.datafile)
2895 origdatapath = orig.opener.join(orig.datafile)
2892 indexname = 'revlog.i'
2896 indexname = 'revlog.i'
2893 dataname = 'revlog.d'
2897 dataname = 'revlog.d'
2894
2898
2895 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2899 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2896 try:
2900 try:
2897 # copy the data file in a temporary directory
2901 # copy the data file in a temporary directory
2898 ui.debug('copying data in %s\n' % tmpdir)
2902 ui.debug('copying data in %s\n' % tmpdir)
2899 destindexpath = os.path.join(tmpdir, 'revlog.i')
2903 destindexpath = os.path.join(tmpdir, 'revlog.i')
2900 destdatapath = os.path.join(tmpdir, 'revlog.d')
2904 destdatapath = os.path.join(tmpdir, 'revlog.d')
2901 shutil.copyfile(origindexpath, destindexpath)
2905 shutil.copyfile(origindexpath, destindexpath)
2902 shutil.copyfile(origdatapath, destdatapath)
2906 shutil.copyfile(origdatapath, destdatapath)
2903
2907
2904 # remove the data we want to add again
2908 # remove the data we want to add again
2905 ui.debug('truncating data to be rewritten\n')
2909 ui.debug('truncating data to be rewritten\n')
2906 with open(destindexpath, 'ab') as index:
2910 with open(destindexpath, 'ab') as index:
2907 index.seek(0)
2911 index.seek(0)
2908 index.truncate(truncaterev * orig._io.size)
2912 index.truncate(truncaterev * orig._io.size)
2909 with open(destdatapath, 'ab') as data:
2913 with open(destdatapath, 'ab') as data:
2910 data.seek(0)
2914 data.seek(0)
2911 data.truncate(orig.start(truncaterev))
2915 data.truncate(orig.start(truncaterev))
2912
2916
2913 # instantiate a new revlog from the temporary copy
2917 # instantiate a new revlog from the temporary copy
2914 ui.debug('truncating adding to be rewritten\n')
2918 ui.debug('truncating adding to be rewritten\n')
2915 vfs = vfsmod.vfs(tmpdir)
2919 vfs = vfsmod.vfs(tmpdir)
2916 vfs.options = getattr(orig.opener, 'options', None)
2920 vfs.options = getattr(orig.opener, 'options', None)
2917
2921
2918 dest = revlog.revlog(
2922 dest = revlog.revlog(
2919 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2923 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2920 )
2924 )
2921 if dest._inline:
2925 if dest._inline:
2922 raise error.Abort('not supporting inline revlog (yet)')
2926 raise error.Abort('not supporting inline revlog (yet)')
2923 # make sure internals are initialized
2927 # make sure internals are initialized
2924 dest.revision(len(dest) - 1)
2928 dest.revision(len(dest) - 1)
2925 yield dest
2929 yield dest
2926 del dest, vfs
2930 del dest, vfs
2927 finally:
2931 finally:
2928 shutil.rmtree(tmpdir, True)
2932 shutil.rmtree(tmpdir, True)
2929
2933
2930
2934
2931 @command(
2935 @command(
2932 b'perfrevlogchunks',
2936 b'perfrevlogchunks',
2933 revlogopts
2937 revlogopts
2934 + formatteropts
2938 + formatteropts
2935 + [
2939 + [
2936 (b'e', b'engines', b'', b'compression engines to use'),
2940 (b'e', b'engines', b'', b'compression engines to use'),
2937 (b's', b'startrev', 0, b'revision to start at'),
2941 (b's', b'startrev', 0, b'revision to start at'),
2938 ],
2942 ],
2939 b'-c|-m|FILE',
2943 b'-c|-m|FILE',
2940 )
2944 )
2941 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2945 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2942 """Benchmark operations on revlog chunks.
2946 """Benchmark operations on revlog chunks.
2943
2947
2944 Logically, each revlog is a collection of fulltext revisions. However,
2948 Logically, each revlog is a collection of fulltext revisions. However,
2945 stored within each revlog are "chunks" of possibly compressed data. This
2949 stored within each revlog are "chunks" of possibly compressed data. This
2946 data needs to be read and decompressed or compressed and written.
2950 data needs to be read and decompressed or compressed and written.
2947
2951
2948 This command measures the time it takes to read+decompress and recompress
2952 This command measures the time it takes to read+decompress and recompress
2949 chunks in a revlog. It effectively isolates I/O and compression performance.
2953 chunks in a revlog. It effectively isolates I/O and compression performance.
2950 For measurements of higher-level operations like resolving revisions,
2954 For measurements of higher-level operations like resolving revisions,
2951 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2955 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2952 """
2956 """
2953 opts = _byteskwargs(opts)
2957 opts = _byteskwargs(opts)
2954
2958
2955 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2959 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2956
2960
2957 # _chunkraw was renamed to _getsegmentforrevs.
2961 # _chunkraw was renamed to _getsegmentforrevs.
2958 try:
2962 try:
2959 segmentforrevs = rl._getsegmentforrevs
2963 segmentforrevs = rl._getsegmentforrevs
2960 except AttributeError:
2964 except AttributeError:
2961 segmentforrevs = rl._chunkraw
2965 segmentforrevs = rl._chunkraw
2962
2966
2963 # Verify engines argument.
2967 # Verify engines argument.
2964 if engines:
2968 if engines:
2965 engines = set(e.strip() for e in engines.split(b','))
2969 engines = set(e.strip() for e in engines.split(b','))
2966 for engine in engines:
2970 for engine in engines:
2967 try:
2971 try:
2968 util.compressionengines[engine]
2972 util.compressionengines[engine]
2969 except KeyError:
2973 except KeyError:
2970 raise error.Abort(b'unknown compression engine: %s' % engine)
2974 raise error.Abort(b'unknown compression engine: %s' % engine)
2971 else:
2975 else:
2972 engines = []
2976 engines = []
2973 for e in util.compengines:
2977 for e in util.compengines:
2974 engine = util.compengines[e]
2978 engine = util.compengines[e]
2975 try:
2979 try:
2976 if engine.available():
2980 if engine.available():
2977 engine.revlogcompressor().compress(b'dummy')
2981 engine.revlogcompressor().compress(b'dummy')
2978 engines.append(e)
2982 engines.append(e)
2979 except NotImplementedError:
2983 except NotImplementedError:
2980 pass
2984 pass
2981
2985
2982 revs = list(rl.revs(startrev, len(rl) - 1))
2986 revs = list(rl.revs(startrev, len(rl) - 1))
2983
2987
2984 def rlfh(rl):
2988 def rlfh(rl):
2985 if rl._inline:
2989 if rl._inline:
2986 return getsvfs(repo)(rl.indexfile)
2990 return getsvfs(repo)(rl.indexfile)
2987 else:
2991 else:
2988 return getsvfs(repo)(rl.datafile)
2992 return getsvfs(repo)(rl.datafile)
2989
2993
2990 def doread():
2994 def doread():
2991 rl.clearcaches()
2995 rl.clearcaches()
2992 for rev in revs:
2996 for rev in revs:
2993 segmentforrevs(rev, rev)
2997 segmentforrevs(rev, rev)
2994
2998
2995 def doreadcachedfh():
2999 def doreadcachedfh():
2996 rl.clearcaches()
3000 rl.clearcaches()
2997 fh = rlfh(rl)
3001 fh = rlfh(rl)
2998 for rev in revs:
3002 for rev in revs:
2999 segmentforrevs(rev, rev, df=fh)
3003 segmentforrevs(rev, rev, df=fh)
3000
3004
3001 def doreadbatch():
3005 def doreadbatch():
3002 rl.clearcaches()
3006 rl.clearcaches()
3003 segmentforrevs(revs[0], revs[-1])
3007 segmentforrevs(revs[0], revs[-1])
3004
3008
3005 def doreadbatchcachedfh():
3009 def doreadbatchcachedfh():
3006 rl.clearcaches()
3010 rl.clearcaches()
3007 fh = rlfh(rl)
3011 fh = rlfh(rl)
3008 segmentforrevs(revs[0], revs[-1], df=fh)
3012 segmentforrevs(revs[0], revs[-1], df=fh)
3009
3013
3010 def dochunk():
3014 def dochunk():
3011 rl.clearcaches()
3015 rl.clearcaches()
3012 fh = rlfh(rl)
3016 fh = rlfh(rl)
3013 for rev in revs:
3017 for rev in revs:
3014 rl._chunk(rev, df=fh)
3018 rl._chunk(rev, df=fh)
3015
3019
3016 chunks = [None]
3020 chunks = [None]
3017
3021
3018 def dochunkbatch():
3022 def dochunkbatch():
3019 rl.clearcaches()
3023 rl.clearcaches()
3020 fh = rlfh(rl)
3024 fh = rlfh(rl)
3021 # Save chunks as a side-effect.
3025 # Save chunks as a side-effect.
3022 chunks[0] = rl._chunks(revs, df=fh)
3026 chunks[0] = rl._chunks(revs, df=fh)
3023
3027
3024 def docompress(compressor):
3028 def docompress(compressor):
3025 rl.clearcaches()
3029 rl.clearcaches()
3026
3030
3027 try:
3031 try:
3028 # Swap in the requested compression engine.
3032 # Swap in the requested compression engine.
3029 oldcompressor = rl._compressor
3033 oldcompressor = rl._compressor
3030 rl._compressor = compressor
3034 rl._compressor = compressor
3031 for chunk in chunks[0]:
3035 for chunk in chunks[0]:
3032 rl.compress(chunk)
3036 rl.compress(chunk)
3033 finally:
3037 finally:
3034 rl._compressor = oldcompressor
3038 rl._compressor = oldcompressor
3035
3039
3036 benches = [
3040 benches = [
3037 (lambda: doread(), b'read'),
3041 (lambda: doread(), b'read'),
3038 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3042 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3039 (lambda: doreadbatch(), b'read batch'),
3043 (lambda: doreadbatch(), b'read batch'),
3040 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3044 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3041 (lambda: dochunk(), b'chunk'),
3045 (lambda: dochunk(), b'chunk'),
3042 (lambda: dochunkbatch(), b'chunk batch'),
3046 (lambda: dochunkbatch(), b'chunk batch'),
3043 ]
3047 ]
3044
3048
3045 for engine in sorted(engines):
3049 for engine in sorted(engines):
3046 compressor = util.compengines[engine].revlogcompressor()
3050 compressor = util.compengines[engine].revlogcompressor()
3047 benches.append(
3051 benches.append(
3048 (
3052 (
3049 functools.partial(docompress, compressor),
3053 functools.partial(docompress, compressor),
3050 b'compress w/ %s' % engine,
3054 b'compress w/ %s' % engine,
3051 )
3055 )
3052 )
3056 )
3053
3057
3054 for fn, title in benches:
3058 for fn, title in benches:
3055 timer, fm = gettimer(ui, opts)
3059 timer, fm = gettimer(ui, opts)
3056 timer(fn, title=title)
3060 timer(fn, title=title)
3057 fm.end()
3061 fm.end()
3058
3062
3059
3063
3060 @command(
3064 @command(
3061 b'perfrevlogrevision',
3065 b'perfrevlogrevision',
3062 revlogopts
3066 revlogopts
3063 + formatteropts
3067 + formatteropts
3064 + [(b'', b'cache', False, b'use caches instead of clearing')],
3068 + [(b'', b'cache', False, b'use caches instead of clearing')],
3065 b'-c|-m|FILE REV',
3069 b'-c|-m|FILE REV',
3066 )
3070 )
3067 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3071 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3068 """Benchmark obtaining a revlog revision.
3072 """Benchmark obtaining a revlog revision.
3069
3073
3070 Obtaining a revlog revision consists of roughly the following steps:
3074 Obtaining a revlog revision consists of roughly the following steps:
3071
3075
3072 1. Compute the delta chain
3076 1. Compute the delta chain
3073 2. Slice the delta chain if applicable
3077 2. Slice the delta chain if applicable
3074 3. Obtain the raw chunks for that delta chain
3078 3. Obtain the raw chunks for that delta chain
3075 4. Decompress each raw chunk
3079 4. Decompress each raw chunk
3076 5. Apply binary patches to obtain fulltext
3080 5. Apply binary patches to obtain fulltext
3077 6. Verify hash of fulltext
3081 6. Verify hash of fulltext
3078
3082
3079 This command measures the time spent in each of these phases.
3083 This command measures the time spent in each of these phases.
3080 """
3084 """
3081 opts = _byteskwargs(opts)
3085 opts = _byteskwargs(opts)
3082
3086
3083 if opts.get(b'changelog') or opts.get(b'manifest'):
3087 if opts.get(b'changelog') or opts.get(b'manifest'):
3084 file_, rev = None, file_
3088 file_, rev = None, file_
3085 elif rev is None:
3089 elif rev is None:
3086 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3090 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3087
3091
3088 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3092 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3089
3093
3090 # _chunkraw was renamed to _getsegmentforrevs.
3094 # _chunkraw was renamed to _getsegmentforrevs.
3091 try:
3095 try:
3092 segmentforrevs = r._getsegmentforrevs
3096 segmentforrevs = r._getsegmentforrevs
3093 except AttributeError:
3097 except AttributeError:
3094 segmentforrevs = r._chunkraw
3098 segmentforrevs = r._chunkraw
3095
3099
3096 node = r.lookup(rev)
3100 node = r.lookup(rev)
3097 rev = r.rev(node)
3101 rev = r.rev(node)
3098
3102
3099 def getrawchunks(data, chain):
3103 def getrawchunks(data, chain):
3100 start = r.start
3104 start = r.start
3101 length = r.length
3105 length = r.length
3102 inline = r._inline
3106 inline = r._inline
3103 iosize = r._io.size
3107 iosize = r._io.size
3104 buffer = util.buffer
3108 buffer = util.buffer
3105
3109
3106 chunks = []
3110 chunks = []
3107 ladd = chunks.append
3111 ladd = chunks.append
3108 for idx, item in enumerate(chain):
3112 for idx, item in enumerate(chain):
3109 offset = start(item[0])
3113 offset = start(item[0])
3110 bits = data[idx]
3114 bits = data[idx]
3111 for rev in item:
3115 for rev in item:
3112 chunkstart = start(rev)
3116 chunkstart = start(rev)
3113 if inline:
3117 if inline:
3114 chunkstart += (rev + 1) * iosize
3118 chunkstart += (rev + 1) * iosize
3115 chunklength = length(rev)
3119 chunklength = length(rev)
3116 ladd(buffer(bits, chunkstart - offset, chunklength))
3120 ladd(buffer(bits, chunkstart - offset, chunklength))
3117
3121
3118 return chunks
3122 return chunks
3119
3123
3120 def dodeltachain(rev):
3124 def dodeltachain(rev):
3121 if not cache:
3125 if not cache:
3122 r.clearcaches()
3126 r.clearcaches()
3123 r._deltachain(rev)
3127 r._deltachain(rev)
3124
3128
3125 def doread(chain):
3129 def doread(chain):
3126 if not cache:
3130 if not cache:
3127 r.clearcaches()
3131 r.clearcaches()
3128 for item in slicedchain:
3132 for item in slicedchain:
3129 segmentforrevs(item[0], item[-1])
3133 segmentforrevs(item[0], item[-1])
3130
3134
3131 def doslice(r, chain, size):
3135 def doslice(r, chain, size):
3132 for s in slicechunk(r, chain, targetsize=size):
3136 for s in slicechunk(r, chain, targetsize=size):
3133 pass
3137 pass
3134
3138
3135 def dorawchunks(data, chain):
3139 def dorawchunks(data, chain):
3136 if not cache:
3140 if not cache:
3137 r.clearcaches()
3141 r.clearcaches()
3138 getrawchunks(data, chain)
3142 getrawchunks(data, chain)
3139
3143
3140 def dodecompress(chunks):
3144 def dodecompress(chunks):
3141 decomp = r.decompress
3145 decomp = r.decompress
3142 for chunk in chunks:
3146 for chunk in chunks:
3143 decomp(chunk)
3147 decomp(chunk)
3144
3148
3145 def dopatch(text, bins):
3149 def dopatch(text, bins):
3146 if not cache:
3150 if not cache:
3147 r.clearcaches()
3151 r.clearcaches()
3148 mdiff.patches(text, bins)
3152 mdiff.patches(text, bins)
3149
3153
3150 def dohash(text):
3154 def dohash(text):
3151 if not cache:
3155 if not cache:
3152 r.clearcaches()
3156 r.clearcaches()
3153 r.checkhash(text, node, rev=rev)
3157 r.checkhash(text, node, rev=rev)
3154
3158
3155 def dorevision():
3159 def dorevision():
3156 if not cache:
3160 if not cache:
3157 r.clearcaches()
3161 r.clearcaches()
3158 r.revision(node)
3162 r.revision(node)
3159
3163
3160 try:
3164 try:
3161 from mercurial.revlogutils.deltas import slicechunk
3165 from mercurial.revlogutils.deltas import slicechunk
3162 except ImportError:
3166 except ImportError:
3163 slicechunk = getattr(revlog, '_slicechunk', None)
3167 slicechunk = getattr(revlog, '_slicechunk', None)
3164
3168
3165 size = r.length(rev)
3169 size = r.length(rev)
3166 chain = r._deltachain(rev)[0]
3170 chain = r._deltachain(rev)[0]
3167 if not getattr(r, '_withsparseread', False):
3171 if not getattr(r, '_withsparseread', False):
3168 slicedchain = (chain,)
3172 slicedchain = (chain,)
3169 else:
3173 else:
3170 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3174 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3171 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3175 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3172 rawchunks = getrawchunks(data, slicedchain)
3176 rawchunks = getrawchunks(data, slicedchain)
3173 bins = r._chunks(chain)
3177 bins = r._chunks(chain)
3174 text = bytes(bins[0])
3178 text = bytes(bins[0])
3175 bins = bins[1:]
3179 bins = bins[1:]
3176 text = mdiff.patches(text, bins)
3180 text = mdiff.patches(text, bins)
3177
3181
3178 benches = [
3182 benches = [
3179 (lambda: dorevision(), b'full'),
3183 (lambda: dorevision(), b'full'),
3180 (lambda: dodeltachain(rev), b'deltachain'),
3184 (lambda: dodeltachain(rev), b'deltachain'),
3181 (lambda: doread(chain), b'read'),
3185 (lambda: doread(chain), b'read'),
3182 ]
3186 ]
3183
3187
3184 if getattr(r, '_withsparseread', False):
3188 if getattr(r, '_withsparseread', False):
3185 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3189 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3186 benches.append(slicing)
3190 benches.append(slicing)
3187
3191
3188 benches.extend(
3192 benches.extend(
3189 [
3193 [
3190 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3194 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3191 (lambda: dodecompress(rawchunks), b'decompress'),
3195 (lambda: dodecompress(rawchunks), b'decompress'),
3192 (lambda: dopatch(text, bins), b'patch'),
3196 (lambda: dopatch(text, bins), b'patch'),
3193 (lambda: dohash(text), b'hash'),
3197 (lambda: dohash(text), b'hash'),
3194 ]
3198 ]
3195 )
3199 )
3196
3200
3197 timer, fm = gettimer(ui, opts)
3201 timer, fm = gettimer(ui, opts)
3198 for fn, title in benches:
3202 for fn, title in benches:
3199 timer(fn, title=title)
3203 timer(fn, title=title)
3200 fm.end()
3204 fm.end()
3201
3205
3202
3206
3203 @command(
3207 @command(
3204 b'perfrevset',
3208 b'perfrevset',
3205 [
3209 [
3206 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3210 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3207 (b'', b'contexts', False, b'obtain changectx for each revision'),
3211 (b'', b'contexts', False, b'obtain changectx for each revision'),
3208 ]
3212 ]
3209 + formatteropts,
3213 + formatteropts,
3210 b"REVSET",
3214 b"REVSET",
3211 )
3215 )
3212 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3216 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3213 """benchmark the execution time of a revset
3217 """benchmark the execution time of a revset
3214
3218
3215 Use the --clean option if need to evaluate the impact of build volatile
3219 Use the --clean option if need to evaluate the impact of build volatile
3216 revisions set cache on the revset execution. Volatile cache hold filtered
3220 revisions set cache on the revset execution. Volatile cache hold filtered
3217 and obsolete related cache."""
3221 and obsolete related cache."""
3218 opts = _byteskwargs(opts)
3222 opts = _byteskwargs(opts)
3219
3223
3220 timer, fm = gettimer(ui, opts)
3224 timer, fm = gettimer(ui, opts)
3221
3225
3222 def d():
3226 def d():
3223 if clear:
3227 if clear:
3224 repo.invalidatevolatilesets()
3228 repo.invalidatevolatilesets()
3225 if contexts:
3229 if contexts:
3226 for ctx in repo.set(expr):
3230 for ctx in repo.set(expr):
3227 pass
3231 pass
3228 else:
3232 else:
3229 for r in repo.revs(expr):
3233 for r in repo.revs(expr):
3230 pass
3234 pass
3231
3235
3232 timer(d)
3236 timer(d)
3233 fm.end()
3237 fm.end()
3234
3238
3235
3239
3236 @command(
3240 @command(
3237 b'perfvolatilesets',
3241 b'perfvolatilesets',
3238 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3242 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3239 + formatteropts,
3243 + formatteropts,
3240 )
3244 )
3241 def perfvolatilesets(ui, repo, *names, **opts):
3245 def perfvolatilesets(ui, repo, *names, **opts):
3242 """benchmark the computation of various volatile set
3246 """benchmark the computation of various volatile set
3243
3247
3244 Volatile set computes element related to filtering and obsolescence."""
3248 Volatile set computes element related to filtering and obsolescence."""
3245 opts = _byteskwargs(opts)
3249 opts = _byteskwargs(opts)
3246 timer, fm = gettimer(ui, opts)
3250 timer, fm = gettimer(ui, opts)
3247 repo = repo.unfiltered()
3251 repo = repo.unfiltered()
3248
3252
3249 def getobs(name):
3253 def getobs(name):
3250 def d():
3254 def d():
3251 repo.invalidatevolatilesets()
3255 repo.invalidatevolatilesets()
3252 if opts[b'clear_obsstore']:
3256 if opts[b'clear_obsstore']:
3253 clearfilecache(repo, b'obsstore')
3257 clearfilecache(repo, b'obsstore')
3254 obsolete.getrevs(repo, name)
3258 obsolete.getrevs(repo, name)
3255
3259
3256 return d
3260 return d
3257
3261
3258 allobs = sorted(obsolete.cachefuncs)
3262 allobs = sorted(obsolete.cachefuncs)
3259 if names:
3263 if names:
3260 allobs = [n for n in allobs if n in names]
3264 allobs = [n for n in allobs if n in names]
3261
3265
3262 for name in allobs:
3266 for name in allobs:
3263 timer(getobs(name), title=name)
3267 timer(getobs(name), title=name)
3264
3268
3265 def getfiltered(name):
3269 def getfiltered(name):
3266 def d():
3270 def d():
3267 repo.invalidatevolatilesets()
3271 repo.invalidatevolatilesets()
3268 if opts[b'clear_obsstore']:
3272 if opts[b'clear_obsstore']:
3269 clearfilecache(repo, b'obsstore')
3273 clearfilecache(repo, b'obsstore')
3270 repoview.filterrevs(repo, name)
3274 repoview.filterrevs(repo, name)
3271
3275
3272 return d
3276 return d
3273
3277
3274 allfilter = sorted(repoview.filtertable)
3278 allfilter = sorted(repoview.filtertable)
3275 if names:
3279 if names:
3276 allfilter = [n for n in allfilter if n in names]
3280 allfilter = [n for n in allfilter if n in names]
3277
3281
3278 for name in allfilter:
3282 for name in allfilter:
3279 timer(getfiltered(name), title=name)
3283 timer(getfiltered(name), title=name)
3280 fm.end()
3284 fm.end()
3281
3285
3282
3286
3283 @command(
3287 @command(
3284 b'perfbranchmap',
3288 b'perfbranchmap',
3285 [
3289 [
3286 (b'f', b'full', False, b'Includes build time of subset'),
3290 (b'f', b'full', False, b'Includes build time of subset'),
3287 (
3291 (
3288 b'',
3292 b'',
3289 b'clear-revbranch',
3293 b'clear-revbranch',
3290 False,
3294 False,
3291 b'purge the revbranch cache between computation',
3295 b'purge the revbranch cache between computation',
3292 ),
3296 ),
3293 ]
3297 ]
3294 + formatteropts,
3298 + formatteropts,
3295 )
3299 )
3296 def perfbranchmap(ui, repo, *filternames, **opts):
3300 def perfbranchmap(ui, repo, *filternames, **opts):
3297 """benchmark the update of a branchmap
3301 """benchmark the update of a branchmap
3298
3302
3299 This benchmarks the full repo.branchmap() call with read and write disabled
3303 This benchmarks the full repo.branchmap() call with read and write disabled
3300 """
3304 """
3301 opts = _byteskwargs(opts)
3305 opts = _byteskwargs(opts)
3302 full = opts.get(b"full", False)
3306 full = opts.get(b"full", False)
3303 clear_revbranch = opts.get(b"clear_revbranch", False)
3307 clear_revbranch = opts.get(b"clear_revbranch", False)
3304 timer, fm = gettimer(ui, opts)
3308 timer, fm = gettimer(ui, opts)
3305
3309
3306 def getbranchmap(filtername):
3310 def getbranchmap(filtername):
3307 """generate a benchmark function for the filtername"""
3311 """generate a benchmark function for the filtername"""
3308 if filtername is None:
3312 if filtername is None:
3309 view = repo
3313 view = repo
3310 else:
3314 else:
3311 view = repo.filtered(filtername)
3315 view = repo.filtered(filtername)
3312 if util.safehasattr(view._branchcaches, '_per_filter'):
3316 if util.safehasattr(view._branchcaches, '_per_filter'):
3313 filtered = view._branchcaches._per_filter
3317 filtered = view._branchcaches._per_filter
3314 else:
3318 else:
3315 # older versions
3319 # older versions
3316 filtered = view._branchcaches
3320 filtered = view._branchcaches
3317
3321
3318 def d():
3322 def d():
3319 if clear_revbranch:
3323 if clear_revbranch:
3320 repo.revbranchcache()._clear()
3324 repo.revbranchcache()._clear()
3321 if full:
3325 if full:
3322 view._branchcaches.clear()
3326 view._branchcaches.clear()
3323 else:
3327 else:
3324 filtered.pop(filtername, None)
3328 filtered.pop(filtername, None)
3325 view.branchmap()
3329 view.branchmap()
3326
3330
3327 return d
3331 return d
3328
3332
3329 # add filter in smaller subset to bigger subset
3333 # add filter in smaller subset to bigger subset
3330 possiblefilters = set(repoview.filtertable)
3334 possiblefilters = set(repoview.filtertable)
3331 if filternames:
3335 if filternames:
3332 possiblefilters &= set(filternames)
3336 possiblefilters &= set(filternames)
3333 subsettable = getbranchmapsubsettable()
3337 subsettable = getbranchmapsubsettable()
3334 allfilters = []
3338 allfilters = []
3335 while possiblefilters:
3339 while possiblefilters:
3336 for name in possiblefilters:
3340 for name in possiblefilters:
3337 subset = subsettable.get(name)
3341 subset = subsettable.get(name)
3338 if subset not in possiblefilters:
3342 if subset not in possiblefilters:
3339 break
3343 break
3340 else:
3344 else:
3341 assert False, b'subset cycle %s!' % possiblefilters
3345 assert False, b'subset cycle %s!' % possiblefilters
3342 allfilters.append(name)
3346 allfilters.append(name)
3343 possiblefilters.remove(name)
3347 possiblefilters.remove(name)
3344
3348
3345 # warm the cache
3349 # warm the cache
3346 if not full:
3350 if not full:
3347 for name in allfilters:
3351 for name in allfilters:
3348 repo.filtered(name).branchmap()
3352 repo.filtered(name).branchmap()
3349 if not filternames or b'unfiltered' in filternames:
3353 if not filternames or b'unfiltered' in filternames:
3350 # add unfiltered
3354 # add unfiltered
3351 allfilters.append(None)
3355 allfilters.append(None)
3352
3356
3353 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3357 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3354 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3358 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3355 branchcacheread.set(classmethod(lambda *args: None))
3359 branchcacheread.set(classmethod(lambda *args: None))
3356 else:
3360 else:
3357 # older versions
3361 # older versions
3358 branchcacheread = safeattrsetter(branchmap, b'read')
3362 branchcacheread = safeattrsetter(branchmap, b'read')
3359 branchcacheread.set(lambda *args: None)
3363 branchcacheread.set(lambda *args: None)
3360 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3364 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3361 branchcachewrite.set(lambda *args: None)
3365 branchcachewrite.set(lambda *args: None)
3362 try:
3366 try:
3363 for name in allfilters:
3367 for name in allfilters:
3364 printname = name
3368 printname = name
3365 if name is None:
3369 if name is None:
3366 printname = b'unfiltered'
3370 printname = b'unfiltered'
3367 timer(getbranchmap(name), title=str(printname))
3371 timer(getbranchmap(name), title=str(printname))
3368 finally:
3372 finally:
3369 branchcacheread.restore()
3373 branchcacheread.restore()
3370 branchcachewrite.restore()
3374 branchcachewrite.restore()
3371 fm.end()
3375 fm.end()
3372
3376
3373
3377
3374 @command(
3378 @command(
3375 b'perfbranchmapupdate',
3379 b'perfbranchmapupdate',
3376 [
3380 [
3377 (b'', b'base', [], b'subset of revision to start from'),
3381 (b'', b'base', [], b'subset of revision to start from'),
3378 (b'', b'target', [], b'subset of revision to end with'),
3382 (b'', b'target', [], b'subset of revision to end with'),
3379 (b'', b'clear-caches', False, b'clear cache between each runs'),
3383 (b'', b'clear-caches', False, b'clear cache between each runs'),
3380 ]
3384 ]
3381 + formatteropts,
3385 + formatteropts,
3382 )
3386 )
3383 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3387 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3384 """benchmark branchmap update from for <base> revs to <target> revs
3388 """benchmark branchmap update from for <base> revs to <target> revs
3385
3389
3386 If `--clear-caches` is passed, the following items will be reset before
3390 If `--clear-caches` is passed, the following items will be reset before
3387 each update:
3391 each update:
3388 * the changelog instance and associated indexes
3392 * the changelog instance and associated indexes
3389 * the rev-branch-cache instance
3393 * the rev-branch-cache instance
3390
3394
3391 Examples:
3395 Examples:
3392
3396
3393 # update for the one last revision
3397 # update for the one last revision
3394 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3398 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3395
3399
3396 $ update for change coming with a new branch
3400 $ update for change coming with a new branch
3397 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3401 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3398 """
3402 """
3399 from mercurial import branchmap
3403 from mercurial import branchmap
3400 from mercurial import repoview
3404 from mercurial import repoview
3401
3405
3402 opts = _byteskwargs(opts)
3406 opts = _byteskwargs(opts)
3403 timer, fm = gettimer(ui, opts)
3407 timer, fm = gettimer(ui, opts)
3404 clearcaches = opts[b'clear_caches']
3408 clearcaches = opts[b'clear_caches']
3405 unfi = repo.unfiltered()
3409 unfi = repo.unfiltered()
3406 x = [None] # used to pass data between closure
3410 x = [None] # used to pass data between closure
3407
3411
3408 # we use a `list` here to avoid possible side effect from smartset
3412 # we use a `list` here to avoid possible side effect from smartset
3409 baserevs = list(scmutil.revrange(repo, base))
3413 baserevs = list(scmutil.revrange(repo, base))
3410 targetrevs = list(scmutil.revrange(repo, target))
3414 targetrevs = list(scmutil.revrange(repo, target))
3411 if not baserevs:
3415 if not baserevs:
3412 raise error.Abort(b'no revisions selected for --base')
3416 raise error.Abort(b'no revisions selected for --base')
3413 if not targetrevs:
3417 if not targetrevs:
3414 raise error.Abort(b'no revisions selected for --target')
3418 raise error.Abort(b'no revisions selected for --target')
3415
3419
3416 # make sure the target branchmap also contains the one in the base
3420 # make sure the target branchmap also contains the one in the base
3417 targetrevs = list(set(baserevs) | set(targetrevs))
3421 targetrevs = list(set(baserevs) | set(targetrevs))
3418 targetrevs.sort()
3422 targetrevs.sort()
3419
3423
3420 cl = repo.changelog
3424 cl = repo.changelog
3421 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3425 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3422 allbaserevs.sort()
3426 allbaserevs.sort()
3423 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3427 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3424
3428
3425 newrevs = list(alltargetrevs.difference(allbaserevs))
3429 newrevs = list(alltargetrevs.difference(allbaserevs))
3426 newrevs.sort()
3430 newrevs.sort()
3427
3431
3428 allrevs = frozenset(unfi.changelog.revs())
3432 allrevs = frozenset(unfi.changelog.revs())
3429 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3433 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3430 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3434 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3431
3435
3432 def basefilter(repo, visibilityexceptions=None):
3436 def basefilter(repo, visibilityexceptions=None):
3433 return basefilterrevs
3437 return basefilterrevs
3434
3438
3435 def targetfilter(repo, visibilityexceptions=None):
3439 def targetfilter(repo, visibilityexceptions=None):
3436 return targetfilterrevs
3440 return targetfilterrevs
3437
3441
3438 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3442 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3439 ui.status(msg % (len(allbaserevs), len(newrevs)))
3443 ui.status(msg % (len(allbaserevs), len(newrevs)))
3440 if targetfilterrevs:
3444 if targetfilterrevs:
3441 msg = b'(%d revisions still filtered)\n'
3445 msg = b'(%d revisions still filtered)\n'
3442 ui.status(msg % len(targetfilterrevs))
3446 ui.status(msg % len(targetfilterrevs))
3443
3447
3444 try:
3448 try:
3445 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3449 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3446 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3450 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3447
3451
3448 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3452 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3449 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3453 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3450
3454
3451 # try to find an existing branchmap to reuse
3455 # try to find an existing branchmap to reuse
3452 subsettable = getbranchmapsubsettable()
3456 subsettable = getbranchmapsubsettable()
3453 candidatefilter = subsettable.get(None)
3457 candidatefilter = subsettable.get(None)
3454 while candidatefilter is not None:
3458 while candidatefilter is not None:
3455 candidatebm = repo.filtered(candidatefilter).branchmap()
3459 candidatebm = repo.filtered(candidatefilter).branchmap()
3456 if candidatebm.validfor(baserepo):
3460 if candidatebm.validfor(baserepo):
3457 filtered = repoview.filterrevs(repo, candidatefilter)
3461 filtered = repoview.filterrevs(repo, candidatefilter)
3458 missing = [r for r in allbaserevs if r in filtered]
3462 missing = [r for r in allbaserevs if r in filtered]
3459 base = candidatebm.copy()
3463 base = candidatebm.copy()
3460 base.update(baserepo, missing)
3464 base.update(baserepo, missing)
3461 break
3465 break
3462 candidatefilter = subsettable.get(candidatefilter)
3466 candidatefilter = subsettable.get(candidatefilter)
3463 else:
3467 else:
3464 # no suitable subset where found
3468 # no suitable subset where found
3465 base = branchmap.branchcache()
3469 base = branchmap.branchcache()
3466 base.update(baserepo, allbaserevs)
3470 base.update(baserepo, allbaserevs)
3467
3471
3468 def setup():
3472 def setup():
3469 x[0] = base.copy()
3473 x[0] = base.copy()
3470 if clearcaches:
3474 if clearcaches:
3471 unfi._revbranchcache = None
3475 unfi._revbranchcache = None
3472 clearchangelog(repo)
3476 clearchangelog(repo)
3473
3477
3474 def bench():
3478 def bench():
3475 x[0].update(targetrepo, newrevs)
3479 x[0].update(targetrepo, newrevs)
3476
3480
3477 timer(bench, setup=setup)
3481 timer(bench, setup=setup)
3478 fm.end()
3482 fm.end()
3479 finally:
3483 finally:
3480 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3484 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3481 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3485 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3482
3486
3483
3487
3484 @command(
3488 @command(
3485 b'perfbranchmapload',
3489 b'perfbranchmapload',
3486 [
3490 [
3487 (b'f', b'filter', b'', b'Specify repoview filter'),
3491 (b'f', b'filter', b'', b'Specify repoview filter'),
3488 (b'', b'list', False, b'List brachmap filter caches'),
3492 (b'', b'list', False, b'List brachmap filter caches'),
3489 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3493 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3490 ]
3494 ]
3491 + formatteropts,
3495 + formatteropts,
3492 )
3496 )
3493 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3497 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3494 """benchmark reading the branchmap"""
3498 """benchmark reading the branchmap"""
3495 opts = _byteskwargs(opts)
3499 opts = _byteskwargs(opts)
3496 clearrevlogs = opts[b'clear_revlogs']
3500 clearrevlogs = opts[b'clear_revlogs']
3497
3501
3498 if list:
3502 if list:
3499 for name, kind, st in repo.cachevfs.readdir(stat=True):
3503 for name, kind, st in repo.cachevfs.readdir(stat=True):
3500 if name.startswith(b'branch2'):
3504 if name.startswith(b'branch2'):
3501 filtername = name.partition(b'-')[2] or b'unfiltered'
3505 filtername = name.partition(b'-')[2] or b'unfiltered'
3502 ui.status(
3506 ui.status(
3503 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3507 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3504 )
3508 )
3505 return
3509 return
3506 if not filter:
3510 if not filter:
3507 filter = None
3511 filter = None
3508 subsettable = getbranchmapsubsettable()
3512 subsettable = getbranchmapsubsettable()
3509 if filter is None:
3513 if filter is None:
3510 repo = repo.unfiltered()
3514 repo = repo.unfiltered()
3511 else:
3515 else:
3512 repo = repoview.repoview(repo, filter)
3516 repo = repoview.repoview(repo, filter)
3513
3517
3514 repo.branchmap() # make sure we have a relevant, up to date branchmap
3518 repo.branchmap() # make sure we have a relevant, up to date branchmap
3515
3519
3516 try:
3520 try:
3517 fromfile = branchmap.branchcache.fromfile
3521 fromfile = branchmap.branchcache.fromfile
3518 except AttributeError:
3522 except AttributeError:
3519 # older versions
3523 # older versions
3520 fromfile = branchmap.read
3524 fromfile = branchmap.read
3521
3525
3522 currentfilter = filter
3526 currentfilter = filter
3523 # try once without timer, the filter may not be cached
3527 # try once without timer, the filter may not be cached
3524 while fromfile(repo) is None:
3528 while fromfile(repo) is None:
3525 currentfilter = subsettable.get(currentfilter)
3529 currentfilter = subsettable.get(currentfilter)
3526 if currentfilter is None:
3530 if currentfilter is None:
3527 raise error.Abort(
3531 raise error.Abort(
3528 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3532 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3529 )
3533 )
3530 repo = repo.filtered(currentfilter)
3534 repo = repo.filtered(currentfilter)
3531 timer, fm = gettimer(ui, opts)
3535 timer, fm = gettimer(ui, opts)
3532
3536
3533 def setup():
3537 def setup():
3534 if clearrevlogs:
3538 if clearrevlogs:
3535 clearchangelog(repo)
3539 clearchangelog(repo)
3536
3540
3537 def bench():
3541 def bench():
3538 fromfile(repo)
3542 fromfile(repo)
3539
3543
3540 timer(bench, setup=setup)
3544 timer(bench, setup=setup)
3541 fm.end()
3545 fm.end()
3542
3546
3543
3547
3544 @command(b'perfloadmarkers')
3548 @command(b'perfloadmarkers')
3545 def perfloadmarkers(ui, repo):
3549 def perfloadmarkers(ui, repo):
3546 """benchmark the time to parse the on-disk markers for a repo
3550 """benchmark the time to parse the on-disk markers for a repo
3547
3551
3548 Result is the number of markers in the repo."""
3552 Result is the number of markers in the repo."""
3549 timer, fm = gettimer(ui)
3553 timer, fm = gettimer(ui)
3550 svfs = getsvfs(repo)
3554 svfs = getsvfs(repo)
3551 timer(lambda: len(obsolete.obsstore(svfs)))
3555 timer(lambda: len(obsolete.obsstore(svfs)))
3552 fm.end()
3556 fm.end()
3553
3557
3554
3558
3555 @command(
3559 @command(
3556 b'perflrucachedict',
3560 b'perflrucachedict',
3557 formatteropts
3561 formatteropts
3558 + [
3562 + [
3559 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3563 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3560 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3564 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3561 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3565 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3562 (b'', b'size', 4, b'size of cache'),
3566 (b'', b'size', 4, b'size of cache'),
3563 (b'', b'gets', 10000, b'number of key lookups'),
3567 (b'', b'gets', 10000, b'number of key lookups'),
3564 (b'', b'sets', 10000, b'number of key sets'),
3568 (b'', b'sets', 10000, b'number of key sets'),
3565 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3569 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3566 (
3570 (
3567 b'',
3571 b'',
3568 b'mixedgetfreq',
3572 b'mixedgetfreq',
3569 50,
3573 50,
3570 b'frequency of get vs set ops in mixed mode',
3574 b'frequency of get vs set ops in mixed mode',
3571 ),
3575 ),
3572 ],
3576 ],
3573 norepo=True,
3577 norepo=True,
3574 )
3578 )
3575 def perflrucache(
3579 def perflrucache(
3576 ui,
3580 ui,
3577 mincost=0,
3581 mincost=0,
3578 maxcost=100,
3582 maxcost=100,
3579 costlimit=0,
3583 costlimit=0,
3580 size=4,
3584 size=4,
3581 gets=10000,
3585 gets=10000,
3582 sets=10000,
3586 sets=10000,
3583 mixed=10000,
3587 mixed=10000,
3584 mixedgetfreq=50,
3588 mixedgetfreq=50,
3585 **opts
3589 **opts
3586 ):
3590 ):
3587 opts = _byteskwargs(opts)
3591 opts = _byteskwargs(opts)
3588
3592
3589 def doinit():
3593 def doinit():
3590 for i in _xrange(10000):
3594 for i in _xrange(10000):
3591 util.lrucachedict(size)
3595 util.lrucachedict(size)
3592
3596
3593 costrange = list(range(mincost, maxcost + 1))
3597 costrange = list(range(mincost, maxcost + 1))
3594
3598
3595 values = []
3599 values = []
3596 for i in _xrange(size):
3600 for i in _xrange(size):
3597 values.append(random.randint(0, _maxint))
3601 values.append(random.randint(0, _maxint))
3598
3602
3599 # Get mode fills the cache and tests raw lookup performance with no
3603 # Get mode fills the cache and tests raw lookup performance with no
3600 # eviction.
3604 # eviction.
3601 getseq = []
3605 getseq = []
3602 for i in _xrange(gets):
3606 for i in _xrange(gets):
3603 getseq.append(random.choice(values))
3607 getseq.append(random.choice(values))
3604
3608
3605 def dogets():
3609 def dogets():
3606 d = util.lrucachedict(size)
3610 d = util.lrucachedict(size)
3607 for v in values:
3611 for v in values:
3608 d[v] = v
3612 d[v] = v
3609 for key in getseq:
3613 for key in getseq:
3610 value = d[key]
3614 value = d[key]
3611 value # silence pyflakes warning
3615 value # silence pyflakes warning
3612
3616
3613 def dogetscost():
3617 def dogetscost():
3614 d = util.lrucachedict(size, maxcost=costlimit)
3618 d = util.lrucachedict(size, maxcost=costlimit)
3615 for i, v in enumerate(values):
3619 for i, v in enumerate(values):
3616 d.insert(v, v, cost=costs[i])
3620 d.insert(v, v, cost=costs[i])
3617 for key in getseq:
3621 for key in getseq:
3618 try:
3622 try:
3619 value = d[key]
3623 value = d[key]
3620 value # silence pyflakes warning
3624 value # silence pyflakes warning
3621 except KeyError:
3625 except KeyError:
3622 pass
3626 pass
3623
3627
3624 # Set mode tests insertion speed with cache eviction.
3628 # Set mode tests insertion speed with cache eviction.
3625 setseq = []
3629 setseq = []
3626 costs = []
3630 costs = []
3627 for i in _xrange(sets):
3631 for i in _xrange(sets):
3628 setseq.append(random.randint(0, _maxint))
3632 setseq.append(random.randint(0, _maxint))
3629 costs.append(random.choice(costrange))
3633 costs.append(random.choice(costrange))
3630
3634
3631 def doinserts():
3635 def doinserts():
3632 d = util.lrucachedict(size)
3636 d = util.lrucachedict(size)
3633 for v in setseq:
3637 for v in setseq:
3634 d.insert(v, v)
3638 d.insert(v, v)
3635
3639
3636 def doinsertscost():
3640 def doinsertscost():
3637 d = util.lrucachedict(size, maxcost=costlimit)
3641 d = util.lrucachedict(size, maxcost=costlimit)
3638 for i, v in enumerate(setseq):
3642 for i, v in enumerate(setseq):
3639 d.insert(v, v, cost=costs[i])
3643 d.insert(v, v, cost=costs[i])
3640
3644
3641 def dosets():
3645 def dosets():
3642 d = util.lrucachedict(size)
3646 d = util.lrucachedict(size)
3643 for v in setseq:
3647 for v in setseq:
3644 d[v] = v
3648 d[v] = v
3645
3649
3646 # Mixed mode randomly performs gets and sets with eviction.
3650 # Mixed mode randomly performs gets and sets with eviction.
3647 mixedops = []
3651 mixedops = []
3648 for i in _xrange(mixed):
3652 for i in _xrange(mixed):
3649 r = random.randint(0, 100)
3653 r = random.randint(0, 100)
3650 if r < mixedgetfreq:
3654 if r < mixedgetfreq:
3651 op = 0
3655 op = 0
3652 else:
3656 else:
3653 op = 1
3657 op = 1
3654
3658
3655 mixedops.append(
3659 mixedops.append(
3656 (op, random.randint(0, size * 2), random.choice(costrange))
3660 (op, random.randint(0, size * 2), random.choice(costrange))
3657 )
3661 )
3658
3662
3659 def domixed():
3663 def domixed():
3660 d = util.lrucachedict(size)
3664 d = util.lrucachedict(size)
3661
3665
3662 for op, v, cost in mixedops:
3666 for op, v, cost in mixedops:
3663 if op == 0:
3667 if op == 0:
3664 try:
3668 try:
3665 d[v]
3669 d[v]
3666 except KeyError:
3670 except KeyError:
3667 pass
3671 pass
3668 else:
3672 else:
3669 d[v] = v
3673 d[v] = v
3670
3674
3671 def domixedcost():
3675 def domixedcost():
3672 d = util.lrucachedict(size, maxcost=costlimit)
3676 d = util.lrucachedict(size, maxcost=costlimit)
3673
3677
3674 for op, v, cost in mixedops:
3678 for op, v, cost in mixedops:
3675 if op == 0:
3679 if op == 0:
3676 try:
3680 try:
3677 d[v]
3681 d[v]
3678 except KeyError:
3682 except KeyError:
3679 pass
3683 pass
3680 else:
3684 else:
3681 d.insert(v, v, cost=cost)
3685 d.insert(v, v, cost=cost)
3682
3686
3683 benches = [
3687 benches = [
3684 (doinit, b'init'),
3688 (doinit, b'init'),
3685 ]
3689 ]
3686
3690
3687 if costlimit:
3691 if costlimit:
3688 benches.extend(
3692 benches.extend(
3689 [
3693 [
3690 (dogetscost, b'gets w/ cost limit'),
3694 (dogetscost, b'gets w/ cost limit'),
3691 (doinsertscost, b'inserts w/ cost limit'),
3695 (doinsertscost, b'inserts w/ cost limit'),
3692 (domixedcost, b'mixed w/ cost limit'),
3696 (domixedcost, b'mixed w/ cost limit'),
3693 ]
3697 ]
3694 )
3698 )
3695 else:
3699 else:
3696 benches.extend(
3700 benches.extend(
3697 [
3701 [
3698 (dogets, b'gets'),
3702 (dogets, b'gets'),
3699 (doinserts, b'inserts'),
3703 (doinserts, b'inserts'),
3700 (dosets, b'sets'),
3704 (dosets, b'sets'),
3701 (domixed, b'mixed'),
3705 (domixed, b'mixed'),
3702 ]
3706 ]
3703 )
3707 )
3704
3708
3705 for fn, title in benches:
3709 for fn, title in benches:
3706 timer, fm = gettimer(ui, opts)
3710 timer, fm = gettimer(ui, opts)
3707 timer(fn, title=title)
3711 timer(fn, title=title)
3708 fm.end()
3712 fm.end()
3709
3713
3710
3714
3711 @command(b'perfwrite', formatteropts)
3715 @command(b'perfwrite', formatteropts)
3712 def perfwrite(ui, repo, **opts):
3716 def perfwrite(ui, repo, **opts):
3713 """microbenchmark ui.write
3717 """microbenchmark ui.write
3714 """
3718 """
3715 opts = _byteskwargs(opts)
3719 opts = _byteskwargs(opts)
3716
3720
3717 timer, fm = gettimer(ui, opts)
3721 timer, fm = gettimer(ui, opts)
3718
3722
3719 def write():
3723 def write():
3720 for i in range(100000):
3724 for i in range(100000):
3721 ui.writenoi18n(b'Testing write performance\n')
3725 ui.writenoi18n(b'Testing write performance\n')
3722
3726
3723 timer(write)
3727 timer(write)
3724 fm.end()
3728 fm.end()
3725
3729
3726
3730
3727 def uisetup(ui):
3731 def uisetup(ui):
3728 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3732 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3729 commands, b'debugrevlogopts'
3733 commands, b'debugrevlogopts'
3730 ):
3734 ):
3731 # for "historical portability":
3735 # for "historical portability":
3732 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3736 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3733 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3737 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3734 # openrevlog() should cause failure, because it has been
3738 # openrevlog() should cause failure, because it has been
3735 # available since 3.5 (or 49c583ca48c4).
3739 # available since 3.5 (or 49c583ca48c4).
3736 def openrevlog(orig, repo, cmd, file_, opts):
3740 def openrevlog(orig, repo, cmd, file_, opts):
3737 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3741 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3738 raise error.Abort(
3742 raise error.Abort(
3739 b"This version doesn't support --dir option",
3743 b"This version doesn't support --dir option",
3740 hint=b"use 3.5 or later",
3744 hint=b"use 3.5 or later",
3741 )
3745 )
3742 return orig(repo, cmd, file_, opts)
3746 return orig(repo, cmd, file_, opts)
3743
3747
3744 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3748 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3745
3749
3746
3750
3747 @command(
3751 @command(
3748 b'perfprogress',
3752 b'perfprogress',
3749 formatteropts
3753 formatteropts
3750 + [
3754 + [
3751 (b'', b'topic', b'topic', b'topic for progress messages'),
3755 (b'', b'topic', b'topic', b'topic for progress messages'),
3752 (b'c', b'total', 1000000, b'total value we are progressing to'),
3756 (b'c', b'total', 1000000, b'total value we are progressing to'),
3753 ],
3757 ],
3754 norepo=True,
3758 norepo=True,
3755 )
3759 )
3756 def perfprogress(ui, topic=None, total=None, **opts):
3760 def perfprogress(ui, topic=None, total=None, **opts):
3757 """printing of progress bars"""
3761 """printing of progress bars"""
3758 opts = _byteskwargs(opts)
3762 opts = _byteskwargs(opts)
3759
3763
3760 timer, fm = gettimer(ui, opts)
3764 timer, fm = gettimer(ui, opts)
3761
3765
3762 def doprogress():
3766 def doprogress():
3763 with ui.makeprogress(topic, total=total) as progress:
3767 with ui.makeprogress(topic, total=total) as progress:
3764 for i in _xrange(total):
3768 for i in _xrange(total):
3765 progress.increment()
3769 progress.increment()
3766
3770
3767 timer(doprogress)
3771 timer(doprogress)
3768 fm.end()
3772 fm.end()
@@ -1,396 +1,396
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perfaddremove
81 perfaddremove
82 (no help text available)
82 (no help text available)
83 perfancestors
83 perfancestors
84 (no help text available)
84 (no help text available)
85 perfancestorset
85 perfancestorset
86 (no help text available)
86 (no help text available)
87 perfannotate (no help text available)
87 perfannotate (no help text available)
88 perfbdiff benchmark a bdiff between revisions
88 perfbdiff benchmark a bdiff between revisions
89 perfbookmarks
89 perfbookmarks
90 benchmark parsing bookmarks from disk to memory
90 benchmark parsing bookmarks from disk to memory
91 perfbranchmap
91 perfbranchmap
92 benchmark the update of a branchmap
92 benchmark the update of a branchmap
93 perfbranchmapload
93 perfbranchmapload
94 benchmark reading the branchmap
94 benchmark reading the branchmap
95 perfbranchmapupdate
95 perfbranchmapupdate
96 benchmark branchmap update from for <base> revs to <target>
96 benchmark branchmap update from for <base> revs to <target>
97 revs
97 revs
98 perfbundleread
98 perfbundleread
99 Benchmark reading of bundle files.
99 Benchmark reading of bundle files.
100 perfcca (no help text available)
100 perfcca (no help text available)
101 perfchangegroupchangelog
101 perfchangegroupchangelog
102 Benchmark producing a changelog group for a changegroup.
102 Benchmark producing a changelog group for a changegroup.
103 perfchangeset
103 perfchangeset
104 (no help text available)
104 (no help text available)
105 perfctxfiles (no help text available)
105 perfctxfiles (no help text available)
106 perfdiffwd Profile diff of working directory changes
106 perfdiffwd Profile diff of working directory changes
107 perfdirfoldmap
107 perfdirfoldmap
108 (no help text available)
108 benchmap a 'dirstate._map.dirfoldmap.get()' request
109 perfdirs (no help text available)
109 perfdirs (no help text available)
110 perfdirstate benchmap the time necessary to load a dirstate from scratch
110 perfdirstate benchmap the time necessary to load a dirstate from scratch
111 perfdirstatedirs
111 perfdirstatedirs
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
113 perfdirstatefoldmap
113 perfdirstatefoldmap
114 benchmap a 'dirstate._map.filefoldmap.get()' request
114 benchmap a 'dirstate._map.filefoldmap.get()' request
115 perfdirstatewrite
115 perfdirstatewrite
116 (no help text available)
116 (no help text available)
117 perfdiscovery
117 perfdiscovery
118 benchmark discovery between local repo and the peer at given
118 benchmark discovery between local repo and the peer at given
119 path
119 path
120 perffncacheencode
120 perffncacheencode
121 (no help text available)
121 (no help text available)
122 perffncacheload
122 perffncacheload
123 (no help text available)
123 (no help text available)
124 perffncachewrite
124 perffncachewrite
125 (no help text available)
125 (no help text available)
126 perfheads benchmark the computation of a changelog heads
126 perfheads benchmark the computation of a changelog heads
127 perfhelper-mergecopies
127 perfhelper-mergecopies
128 find statistics about potential parameters for
128 find statistics about potential parameters for
129 'perfmergecopies'
129 'perfmergecopies'
130 perfhelper-pathcopies
130 perfhelper-pathcopies
131 find statistic about potential parameters for the
131 find statistic about potential parameters for the
132 'perftracecopies'
132 'perftracecopies'
133 perfignore benchmark operation related to computing ignore
133 perfignore benchmark operation related to computing ignore
134 perfindex benchmark index creation time followed by a lookup
134 perfindex benchmark index creation time followed by a lookup
135 perflinelogedits
135 perflinelogedits
136 (no help text available)
136 (no help text available)
137 perfloadmarkers
137 perfloadmarkers
138 benchmark the time to parse the on-disk markers for a repo
138 benchmark the time to parse the on-disk markers for a repo
139 perflog (no help text available)
139 perflog (no help text available)
140 perflookup (no help text available)
140 perflookup (no help text available)
141 perflrucachedict
141 perflrucachedict
142 (no help text available)
142 (no help text available)
143 perfmanifest benchmark the time to read a manifest from disk and return a
143 perfmanifest benchmark the time to read a manifest from disk and return a
144 usable
144 usable
145 perfmergecalculate
145 perfmergecalculate
146 (no help text available)
146 (no help text available)
147 perfmergecopies
147 perfmergecopies
148 measure runtime of 'copies.mergecopies'
148 measure runtime of 'copies.mergecopies'
149 perfmoonwalk benchmark walking the changelog backwards
149 perfmoonwalk benchmark walking the changelog backwards
150 perfnodelookup
150 perfnodelookup
151 (no help text available)
151 (no help text available)
152 perfnodemap benchmark the time necessary to look up revision from a cold
152 perfnodemap benchmark the time necessary to look up revision from a cold
153 nodemap
153 nodemap
154 perfparents benchmark the time necessary to fetch one changeset's parents.
154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 perfpathcopies
155 perfpathcopies
156 benchmark the copy tracing logic
156 benchmark the copy tracing logic
157 perfphases benchmark phasesets computation
157 perfphases benchmark phasesets computation
158 perfphasesremote
158 perfphasesremote
159 benchmark time needed to analyse phases of the remote server
159 benchmark time needed to analyse phases of the remote server
160 perfprogress printing of progress bars
160 perfprogress printing of progress bars
161 perfrawfiles (no help text available)
161 perfrawfiles (no help text available)
162 perfrevlogchunks
162 perfrevlogchunks
163 Benchmark operations on revlog chunks.
163 Benchmark operations on revlog chunks.
164 perfrevlogindex
164 perfrevlogindex
165 Benchmark operations against a revlog index.
165 Benchmark operations against a revlog index.
166 perfrevlogrevision
166 perfrevlogrevision
167 Benchmark obtaining a revlog revision.
167 Benchmark obtaining a revlog revision.
168 perfrevlogrevisions
168 perfrevlogrevisions
169 Benchmark reading a series of revisions from a revlog.
169 Benchmark reading a series of revisions from a revlog.
170 perfrevlogwrite
170 perfrevlogwrite
171 Benchmark writing a series of revisions to a revlog.
171 Benchmark writing a series of revisions to a revlog.
172 perfrevrange (no help text available)
172 perfrevrange (no help text available)
173 perfrevset benchmark the execution time of a revset
173 perfrevset benchmark the execution time of a revset
174 perfstartup (no help text available)
174 perfstartup (no help text available)
175 perfstatus benchmark the performance of a single status call
175 perfstatus benchmark the performance of a single status call
176 perftags (no help text available)
176 perftags (no help text available)
177 perftemplating
177 perftemplating
178 test the rendering time of a given template
178 test the rendering time of a given template
179 perfunidiff benchmark a unified diff between revisions
179 perfunidiff benchmark a unified diff between revisions
180 perfvolatilesets
180 perfvolatilesets
181 benchmark the computation of various volatile set
181 benchmark the computation of various volatile set
182 perfwalk (no help text available)
182 perfwalk (no help text available)
183 perfwrite microbenchmark ui.write
183 perfwrite microbenchmark ui.write
184
184
185 (use 'hg help -v perf' to show built-in aliases and global options)
185 (use 'hg help -v perf' to show built-in aliases and global options)
186 $ hg perfaddremove
186 $ hg perfaddremove
187 $ hg perfancestors
187 $ hg perfancestors
188 $ hg perfancestorset 2
188 $ hg perfancestorset 2
189 $ hg perfannotate a
189 $ hg perfannotate a
190 $ hg perfbdiff -c 1
190 $ hg perfbdiff -c 1
191 $ hg perfbdiff --alldata 1
191 $ hg perfbdiff --alldata 1
192 $ hg perfunidiff -c 1
192 $ hg perfunidiff -c 1
193 $ hg perfunidiff --alldata 1
193 $ hg perfunidiff --alldata 1
194 $ hg perfbookmarks
194 $ hg perfbookmarks
195 $ hg perfbranchmap
195 $ hg perfbranchmap
196 $ hg perfbranchmapload
196 $ hg perfbranchmapload
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 benchmark of branchmap with 3 revisions with 1 new ones
198 benchmark of branchmap with 3 revisions with 1 new ones
199 $ hg perfcca
199 $ hg perfcca
200 $ hg perfchangegroupchangelog
200 $ hg perfchangegroupchangelog
201 $ hg perfchangegroupchangelog --cgversion 01
201 $ hg perfchangegroupchangelog --cgversion 01
202 $ hg perfchangeset 2
202 $ hg perfchangeset 2
203 $ hg perfctxfiles 2
203 $ hg perfctxfiles 2
204 $ hg perfdiffwd
204 $ hg perfdiffwd
205 $ hg perfdirfoldmap
205 $ hg perfdirfoldmap
206 $ hg perfdirs
206 $ hg perfdirs
207 $ hg perfdirstate
207 $ hg perfdirstate
208 $ hg perfdirstatedirs
208 $ hg perfdirstatedirs
209 $ hg perfdirstatefoldmap
209 $ hg perfdirstatefoldmap
210 $ hg perfdirstatewrite
210 $ hg perfdirstatewrite
211 #if repofncache
211 #if repofncache
212 $ hg perffncacheencode
212 $ hg perffncacheencode
213 $ hg perffncacheload
213 $ hg perffncacheload
214 $ hg debugrebuildfncache
214 $ hg debugrebuildfncache
215 fncache already up to date
215 fncache already up to date
216 $ hg perffncachewrite
216 $ hg perffncachewrite
217 $ hg debugrebuildfncache
217 $ hg debugrebuildfncache
218 fncache already up to date
218 fncache already up to date
219 #endif
219 #endif
220 $ hg perfheads
220 $ hg perfheads
221 $ hg perfignore
221 $ hg perfignore
222 $ hg perfindex
222 $ hg perfindex
223 $ hg perflinelogedits -n 1
223 $ hg perflinelogedits -n 1
224 $ hg perfloadmarkers
224 $ hg perfloadmarkers
225 $ hg perflog
225 $ hg perflog
226 $ hg perflookup 2
226 $ hg perflookup 2
227 $ hg perflrucache
227 $ hg perflrucache
228 $ hg perfmanifest 2
228 $ hg perfmanifest 2
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
230 $ hg perfmanifest -m 44fe2c8352bb
230 $ hg perfmanifest -m 44fe2c8352bb
231 abort: manifest revision must be integer or full node
231 abort: manifest revision must be integer or full node
232 [255]
232 [255]
233 $ hg perfmergecalculate -r 3
233 $ hg perfmergecalculate -r 3
234 $ hg perfmoonwalk
234 $ hg perfmoonwalk
235 $ hg perfnodelookup 2
235 $ hg perfnodelookup 2
236 $ hg perfpathcopies 1 2
236 $ hg perfpathcopies 1 2
237 $ hg perfprogress --total 1000
237 $ hg perfprogress --total 1000
238 $ hg perfrawfiles 2
238 $ hg perfrawfiles 2
239 $ hg perfrevlogindex -c
239 $ hg perfrevlogindex -c
240 #if reporevlogstore
240 #if reporevlogstore
241 $ hg perfrevlogrevisions .hg/store/data/a.i
241 $ hg perfrevlogrevisions .hg/store/data/a.i
242 #endif
242 #endif
243 $ hg perfrevlogrevision -m 0
243 $ hg perfrevlogrevision -m 0
244 $ hg perfrevlogchunks -c
244 $ hg perfrevlogchunks -c
245 $ hg perfrevrange
245 $ hg perfrevrange
246 $ hg perfrevset 'all()'
246 $ hg perfrevset 'all()'
247 $ hg perfstartup
247 $ hg perfstartup
248 $ hg perfstatus
248 $ hg perfstatus
249 $ hg perftags
249 $ hg perftags
250 $ hg perftemplating
250 $ hg perftemplating
251 $ hg perfvolatilesets
251 $ hg perfvolatilesets
252 $ hg perfwalk
252 $ hg perfwalk
253 $ hg perfparents
253 $ hg perfparents
254 $ hg perfdiscovery -q .
254 $ hg perfdiscovery -q .
255
255
256 Test run control
256 Test run control
257 ----------------
257 ----------------
258
258
259 Simple single entry
259 Simple single entry
260
260
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
262 ! wall * comb * user * sys * (best of 15) (glob)
262 ! wall * comb * user * sys * (best of 15) (glob)
263
263
264 Multiple entries
264 Multiple entries
265
265
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
267 ! wall * comb * user * sys * (best of 5) (glob)
267 ! wall * comb * user * sys * (best of 5) (glob)
268
268
269 error case are ignored
269 error case are ignored
270
270
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
272 malformatted run limit entry, missing "-": 500
272 malformatted run limit entry, missing "-": 500
273 ! wall * comb * user * sys * (best of 5) (glob)
273 ! wall * comb * user * sys * (best of 5) (glob)
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
277 ! wall * comb * user * sys * (best of 5) (glob)
277 ! wall * comb * user * sys * (best of 5) (glob)
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
280 ! wall * comb * user * sys * (best of 5) (glob)
280 ! wall * comb * user * sys * (best of 5) (glob)
281
281
282 test actual output
282 test actual output
283 ------------------
283 ------------------
284
284
285 normal output:
285 normal output:
286
286
287 $ hg perfheads --config perf.stub=no
287 $ hg perfheads --config perf.stub=no
288 ! wall * comb * user * sys * (best of *) (glob)
288 ! wall * comb * user * sys * (best of *) (glob)
289
289
290 detailed output:
290 detailed output:
291
291
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
293 ! wall * comb * user * sys * (best of *) (glob)
293 ! wall * comb * user * sys * (best of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
297
297
298 test json output
298 test json output
299 ----------------
299 ----------------
300
300
301 normal output:
301 normal output:
302
302
303 $ hg perfheads --template json --config perf.stub=no
303 $ hg perfheads --template json --config perf.stub=no
304 [
304 [
305 {
305 {
306 "comb": *, (glob)
306 "comb": *, (glob)
307 "count": *, (glob)
307 "count": *, (glob)
308 "sys": *, (glob)
308 "sys": *, (glob)
309 "user": *, (glob)
309 "user": *, (glob)
310 "wall": * (glob)
310 "wall": * (glob)
311 }
311 }
312 ]
312 ]
313
313
314 detailed output:
314 detailed output:
315
315
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
317 [
317 [
318 {
318 {
319 "avg.comb": *, (glob)
319 "avg.comb": *, (glob)
320 "avg.count": *, (glob)
320 "avg.count": *, (glob)
321 "avg.sys": *, (glob)
321 "avg.sys": *, (glob)
322 "avg.user": *, (glob)
322 "avg.user": *, (glob)
323 "avg.wall": *, (glob)
323 "avg.wall": *, (glob)
324 "comb": *, (glob)
324 "comb": *, (glob)
325 "count": *, (glob)
325 "count": *, (glob)
326 "max.comb": *, (glob)
326 "max.comb": *, (glob)
327 "max.count": *, (glob)
327 "max.count": *, (glob)
328 "max.sys": *, (glob)
328 "max.sys": *, (glob)
329 "max.user": *, (glob)
329 "max.user": *, (glob)
330 "max.wall": *, (glob)
330 "max.wall": *, (glob)
331 "median.comb": *, (glob)
331 "median.comb": *, (glob)
332 "median.count": *, (glob)
332 "median.count": *, (glob)
333 "median.sys": *, (glob)
333 "median.sys": *, (glob)
334 "median.user": *, (glob)
334 "median.user": *, (glob)
335 "median.wall": *, (glob)
335 "median.wall": *, (glob)
336 "sys": *, (glob)
336 "sys": *, (glob)
337 "user": *, (glob)
337 "user": *, (glob)
338 "wall": * (glob)
338 "wall": * (glob)
339 }
339 }
340 ]
340 ]
341
341
342 Test pre-run feature
342 Test pre-run feature
343 --------------------
343 --------------------
344
344
345 (perf discovery has some spurious output)
345 (perf discovery has some spurious output)
346
346
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
348 ! wall * comb * user * sys * (best of 1) (glob)
348 ! wall * comb * user * sys * (best of 1) (glob)
349 searching for changes
349 searching for changes
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
351 ! wall * comb * user * sys * (best of 1) (glob)
351 ! wall * comb * user * sys * (best of 1) (glob)
352 searching for changes
352 searching for changes
353 searching for changes
353 searching for changes
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
355 ! wall * comb * user * sys * (best of 1) (glob)
355 ! wall * comb * user * sys * (best of 1) (glob)
356 searching for changes
356 searching for changes
357 searching for changes
357 searching for changes
358 searching for changes
358 searching for changes
359 searching for changes
359 searching for changes
360
360
361 test profile-benchmark option
361 test profile-benchmark option
362 ------------------------------
362 ------------------------------
363
363
364 Function to check that statprof ran
364 Function to check that statprof ran
365 $ statprofran () {
365 $ statprofran () {
366 > egrep 'Sample count:|No samples recorded' > /dev/null
366 > egrep 'Sample count:|No samples recorded' > /dev/null
367 > }
367 > }
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
369
369
370 Check perf.py for historical portability
370 Check perf.py for historical portability
371 ----------------------------------------
371 ----------------------------------------
372
372
373 $ cd "$TESTDIR/.."
373 $ cd "$TESTDIR/.."
374
374
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
378 contrib/perf.py:\d+: (re)
378 contrib/perf.py:\d+: (re)
379 > from mercurial import (
379 > from mercurial import (
380 import newer module separately in try clause for early Mercurial
380 import newer module separately in try clause for early Mercurial
381 contrib/perf.py:\d+: (re)
381 contrib/perf.py:\d+: (re)
382 > from mercurial import (
382 > from mercurial import (
383 import newer module separately in try clause for early Mercurial
383 import newer module separately in try clause for early Mercurial
384 contrib/perf.py:\d+: (re)
384 contrib/perf.py:\d+: (re)
385 > origindexpath = orig.opener.join(orig.indexfile)
385 > origindexpath = orig.opener.join(orig.indexfile)
386 use getvfs()/getsvfs() for early Mercurial
386 use getvfs()/getsvfs() for early Mercurial
387 contrib/perf.py:\d+: (re)
387 contrib/perf.py:\d+: (re)
388 > origdatapath = orig.opener.join(orig.datafile)
388 > origdatapath = orig.opener.join(orig.datafile)
389 use getvfs()/getsvfs() for early Mercurial
389 use getvfs()/getsvfs() for early Mercurial
390 contrib/perf.py:\d+: (re)
390 contrib/perf.py:\d+: (re)
391 > vfs = vfsmod.vfs(tmpdir)
391 > vfs = vfsmod.vfs(tmpdir)
392 use getvfs()/getsvfs() for early Mercurial
392 use getvfs()/getsvfs() for early Mercurial
393 contrib/perf.py:\d+: (re)
393 contrib/perf.py:\d+: (re)
394 > vfs.options = getattr(orig.opener, 'options', None)
394 > vfs.options = getattr(orig.opener, 'options', None)
395 use getvfs()/getsvfs() for early Mercurial
395 use getvfs()/getsvfs() for early Mercurial
396 [1]
396 [1]
General Comments 0
You need to be logged in to leave comments. Login now