##// END OF EJS Templates
perf: document `perfdirstatewrite`
marmoute -
r43399:97f9ef77 default
parent child Browse files
Show More
@@ -1,3774 +1,3776 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
767 """benchmark the performance of a single status call
768
768
769 The repository data are preserved between each call.
769 The repository data are preserved between each call.
770
770
771 By default, only the status of the tracked file are requested. If
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
773 """
774 opts = _byteskwargs(opts)
774 opts = _byteskwargs(opts)
775 # m = match.always(repo.root, repo.getcwd())
775 # m = match.always(repo.root, repo.getcwd())
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 # False))))
777 # False))))
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 fm.end()
780 fm.end()
781
781
782
782
783 @command(b'perfaddremove', formatteropts)
783 @command(b'perfaddremove', formatteropts)
784 def perfaddremove(ui, repo, **opts):
784 def perfaddremove(ui, repo, **opts):
785 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
787 try:
787 try:
788 oldquiet = repo.ui.quiet
788 oldquiet = repo.ui.quiet
789 repo.ui.quiet = True
789 repo.ui.quiet = True
790 matcher = scmutil.match(repo[None])
790 matcher = scmutil.match(repo[None])
791 opts[b'dry_run'] = True
791 opts[b'dry_run'] = True
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 uipathfn = scmutil.getuipathfn(repo)
793 uipathfn = scmutil.getuipathfn(repo)
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 else:
795 else:
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 finally:
797 finally:
798 repo.ui.quiet = oldquiet
798 repo.ui.quiet = oldquiet
799 fm.end()
799 fm.end()
800
800
801
801
802 def clearcaches(cl):
802 def clearcaches(cl):
803 # behave somewhat consistently across internal API changes
803 # behave somewhat consistently across internal API changes
804 if util.safehasattr(cl, b'clearcaches'):
804 if util.safehasattr(cl, b'clearcaches'):
805 cl.clearcaches()
805 cl.clearcaches()
806 elif util.safehasattr(cl, b'_nodecache'):
806 elif util.safehasattr(cl, b'_nodecache'):
807 from mercurial.node import nullid, nullrev
807 from mercurial.node import nullid, nullrev
808
808
809 cl._nodecache = {nullid: nullrev}
809 cl._nodecache = {nullid: nullrev}
810 cl._nodepos = None
810 cl._nodepos = None
811
811
812
812
813 @command(b'perfheads', formatteropts)
813 @command(b'perfheads', formatteropts)
814 def perfheads(ui, repo, **opts):
814 def perfheads(ui, repo, **opts):
815 """benchmark the computation of a changelog heads"""
815 """benchmark the computation of a changelog heads"""
816 opts = _byteskwargs(opts)
816 opts = _byteskwargs(opts)
817 timer, fm = gettimer(ui, opts)
817 timer, fm = gettimer(ui, opts)
818 cl = repo.changelog
818 cl = repo.changelog
819
819
820 def s():
820 def s():
821 clearcaches(cl)
821 clearcaches(cl)
822
822
823 def d():
823 def d():
824 len(cl.headrevs())
824 len(cl.headrevs())
825
825
826 timer(d, setup=s)
826 timer(d, setup=s)
827 fm.end()
827 fm.end()
828
828
829
829
830 @command(
830 @command(
831 b'perftags',
831 b'perftags',
832 formatteropts
832 formatteropts
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 )
834 )
835 def perftags(ui, repo, **opts):
835 def perftags(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 repocleartagscache = repocleartagscachefunc(repo)
838 repocleartagscache = repocleartagscachefunc(repo)
839 clearrevlogs = opts[b'clear_revlogs']
839 clearrevlogs = opts[b'clear_revlogs']
840
840
841 def s():
841 def s():
842 if clearrevlogs:
842 if clearrevlogs:
843 clearchangelog(repo)
843 clearchangelog(repo)
844 clearfilecache(repo.unfiltered(), 'manifest')
844 clearfilecache(repo.unfiltered(), 'manifest')
845 repocleartagscache()
845 repocleartagscache()
846
846
847 def t():
847 def t():
848 return len(repo.tags())
848 return len(repo.tags())
849
849
850 timer(t, setup=s)
850 timer(t, setup=s)
851 fm.end()
851 fm.end()
852
852
853
853
854 @command(b'perfancestors', formatteropts)
854 @command(b'perfancestors', formatteropts)
855 def perfancestors(ui, repo, **opts):
855 def perfancestors(ui, repo, **opts):
856 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
858 heads = repo.changelog.headrevs()
858 heads = repo.changelog.headrevs()
859
859
860 def d():
860 def d():
861 for a in repo.changelog.ancestors(heads):
861 for a in repo.changelog.ancestors(heads):
862 pass
862 pass
863
863
864 timer(d)
864 timer(d)
865 fm.end()
865 fm.end()
866
866
867
867
868 @command(b'perfancestorset', formatteropts)
868 @command(b'perfancestorset', formatteropts)
869 def perfancestorset(ui, repo, revset, **opts):
869 def perfancestorset(ui, repo, revset, **opts):
870 opts = _byteskwargs(opts)
870 opts = _byteskwargs(opts)
871 timer, fm = gettimer(ui, opts)
871 timer, fm = gettimer(ui, opts)
872 revs = repo.revs(revset)
872 revs = repo.revs(revset)
873 heads = repo.changelog.headrevs()
873 heads = repo.changelog.headrevs()
874
874
875 def d():
875 def d():
876 s = repo.changelog.ancestors(heads)
876 s = repo.changelog.ancestors(heads)
877 for rev in revs:
877 for rev in revs:
878 rev in s
878 rev in s
879
879
880 timer(d)
880 timer(d)
881 fm.end()
881 fm.end()
882
882
883
883
884 @command(b'perfdiscovery', formatteropts, b'PATH')
884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 def perfdiscovery(ui, repo, path, **opts):
885 def perfdiscovery(ui, repo, path, **opts):
886 """benchmark discovery between local repo and the peer at given path
886 """benchmark discovery between local repo and the peer at given path
887 """
887 """
888 repos = [repo, None]
888 repos = [repo, None]
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 path = ui.expandpath(path)
890 path = ui.expandpath(path)
891
891
892 def s():
892 def s():
893 repos[1] = hg.peer(ui, opts, path)
893 repos[1] = hg.peer(ui, opts, path)
894
894
895 def d():
895 def d():
896 setdiscovery.findcommonheads(ui, *repos)
896 setdiscovery.findcommonheads(ui, *repos)
897
897
898 timer(d, setup=s)
898 timer(d, setup=s)
899 fm.end()
899 fm.end()
900
900
901
901
902 @command(
902 @command(
903 b'perfbookmarks',
903 b'perfbookmarks',
904 formatteropts
904 formatteropts
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 )
906 )
907 def perfbookmarks(ui, repo, **opts):
907 def perfbookmarks(ui, repo, **opts):
908 """benchmark parsing bookmarks from disk to memory"""
908 """benchmark parsing bookmarks from disk to memory"""
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911
911
912 clearrevlogs = opts[b'clear_revlogs']
912 clearrevlogs = opts[b'clear_revlogs']
913
913
914 def s():
914 def s():
915 if clearrevlogs:
915 if clearrevlogs:
916 clearchangelog(repo)
916 clearchangelog(repo)
917 clearfilecache(repo, b'_bookmarks')
917 clearfilecache(repo, b'_bookmarks')
918
918
919 def d():
919 def d():
920 repo._bookmarks
920 repo._bookmarks
921
921
922 timer(d, setup=s)
922 timer(d, setup=s)
923 fm.end()
923 fm.end()
924
924
925
925
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 def perfbundleread(ui, repo, bundlepath, **opts):
927 def perfbundleread(ui, repo, bundlepath, **opts):
928 """Benchmark reading of bundle files.
928 """Benchmark reading of bundle files.
929
929
930 This command is meant to isolate the I/O part of bundle reading as
930 This command is meant to isolate the I/O part of bundle reading as
931 much as possible.
931 much as possible.
932 """
932 """
933 from mercurial import (
933 from mercurial import (
934 bundle2,
934 bundle2,
935 exchange,
935 exchange,
936 streamclone,
936 streamclone,
937 )
937 )
938
938
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940
940
941 def makebench(fn):
941 def makebench(fn):
942 def run():
942 def run():
943 with open(bundlepath, b'rb') as fh:
943 with open(bundlepath, b'rb') as fh:
944 bundle = exchange.readbundle(ui, fh, bundlepath)
944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 fn(bundle)
945 fn(bundle)
946
946
947 return run
947 return run
948
948
949 def makereadnbytes(size):
949 def makereadnbytes(size):
950 def run():
950 def run():
951 with open(bundlepath, b'rb') as fh:
951 with open(bundlepath, b'rb') as fh:
952 bundle = exchange.readbundle(ui, fh, bundlepath)
952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 while bundle.read(size):
953 while bundle.read(size):
954 pass
954 pass
955
955
956 return run
956 return run
957
957
958 def makestdioread(size):
958 def makestdioread(size):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 while fh.read(size):
961 while fh.read(size):
962 pass
962 pass
963
963
964 return run
964 return run
965
965
966 # bundle1
966 # bundle1
967
967
968 def deltaiter(bundle):
968 def deltaiter(bundle):
969 for delta in bundle.deltaiter():
969 for delta in bundle.deltaiter():
970 pass
970 pass
971
971
972 def iterchunks(bundle):
972 def iterchunks(bundle):
973 for chunk in bundle.getchunks():
973 for chunk in bundle.getchunks():
974 pass
974 pass
975
975
976 # bundle2
976 # bundle2
977
977
978 def forwardchunks(bundle):
978 def forwardchunks(bundle):
979 for chunk in bundle._forwardchunks():
979 for chunk in bundle._forwardchunks():
980 pass
980 pass
981
981
982 def iterparts(bundle):
982 def iterparts(bundle):
983 for part in bundle.iterparts():
983 for part in bundle.iterparts():
984 pass
984 pass
985
985
986 def iterpartsseekable(bundle):
986 def iterpartsseekable(bundle):
987 for part in bundle.iterparts(seekable=True):
987 for part in bundle.iterparts(seekable=True):
988 pass
988 pass
989
989
990 def seek(bundle):
990 def seek(bundle):
991 for part in bundle.iterparts(seekable=True):
991 for part in bundle.iterparts(seekable=True):
992 part.seek(0, os.SEEK_END)
992 part.seek(0, os.SEEK_END)
993
993
994 def makepartreadnbytes(size):
994 def makepartreadnbytes(size):
995 def run():
995 def run():
996 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
997 bundle = exchange.readbundle(ui, fh, bundlepath)
997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 for part in bundle.iterparts():
998 for part in bundle.iterparts():
999 while part.read(size):
999 while part.read(size):
1000 pass
1000 pass
1001
1001
1002 return run
1002 return run
1003
1003
1004 benches = [
1004 benches = [
1005 (makestdioread(8192), b'read(8k)'),
1005 (makestdioread(8192), b'read(8k)'),
1006 (makestdioread(16384), b'read(16k)'),
1006 (makestdioread(16384), b'read(16k)'),
1007 (makestdioread(32768), b'read(32k)'),
1007 (makestdioread(32768), b'read(32k)'),
1008 (makestdioread(131072), b'read(128k)'),
1008 (makestdioread(131072), b'read(128k)'),
1009 ]
1009 ]
1010
1010
1011 with open(bundlepath, b'rb') as fh:
1011 with open(bundlepath, b'rb') as fh:
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013
1013
1014 if isinstance(bundle, changegroup.cg1unpacker):
1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 benches.extend(
1015 benches.extend(
1016 [
1016 [
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 ]
1023 ]
1024 )
1024 )
1025 elif isinstance(bundle, bundle2.unbundle20):
1025 elif isinstance(bundle, bundle2.unbundle20):
1026 benches.extend(
1026 benches.extend(
1027 [
1027 [
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 (
1030 (
1031 makebench(iterpartsseekable),
1031 makebench(iterpartsseekable),
1032 b'bundle2 iterparts() seekable',
1032 b'bundle2 iterparts() seekable',
1033 ),
1033 ),
1034 (makebench(seek), b'bundle2 part seek()'),
1034 (makebench(seek), b'bundle2 part seek()'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 ]
1039 ]
1040 )
1040 )
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 raise error.Abort(b'stream clone bundles not supported')
1042 raise error.Abort(b'stream clone bundles not supported')
1043 else:
1043 else:
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045
1045
1046 for fn, title in benches:
1046 for fn, title in benches:
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 timer(fn, title=title)
1048 timer(fn, title=title)
1049 fm.end()
1049 fm.end()
1050
1050
1051
1051
1052 @command(
1052 @command(
1053 b'perfchangegroupchangelog',
1053 b'perfchangegroupchangelog',
1054 formatteropts
1054 formatteropts
1055 + [
1055 + [
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 ],
1058 ],
1059 )
1059 )
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 """Benchmark producing a changelog group for a changegroup.
1061 """Benchmark producing a changelog group for a changegroup.
1062
1062
1063 This measures the time spent processing the changelog during a
1063 This measures the time spent processing the changelog during a
1064 bundle operation. This occurs during `hg bundle` and on a server
1064 bundle operation. This occurs during `hg bundle` and on a server
1065 processing a `getbundle` wire protocol request (handles clones
1065 processing a `getbundle` wire protocol request (handles clones
1066 and pull requests).
1066 and pull requests).
1067
1067
1068 By default, all revisions are added to the changegroup.
1068 By default, all revisions are added to the changegroup.
1069 """
1069 """
1070 opts = _byteskwargs(opts)
1070 opts = _byteskwargs(opts)
1071 cl = repo.changelog
1071 cl = repo.changelog
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 bundler = changegroup.getbundler(cgversion, repo)
1073 bundler = changegroup.getbundler(cgversion, repo)
1074
1074
1075 def d():
1075 def d():
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 for chunk in chunks:
1077 for chunk in chunks:
1078 pass
1078 pass
1079
1079
1080 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1081
1081
1082 # Terminal printing can interfere with timing. So disable it.
1082 # Terminal printing can interfere with timing. So disable it.
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 timer(d)
1084 timer(d)
1085
1085
1086 fm.end()
1086 fm.end()
1087
1087
1088
1088
1089 @command(b'perfdirs', formatteropts)
1089 @command(b'perfdirs', formatteropts)
1090 def perfdirs(ui, repo, **opts):
1090 def perfdirs(ui, repo, **opts):
1091 opts = _byteskwargs(opts)
1091 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1093 dirstate = repo.dirstate
1093 dirstate = repo.dirstate
1094 b'a' in dirstate
1094 b'a' in dirstate
1095
1095
1096 def d():
1096 def d():
1097 dirstate.hasdir(b'a')
1097 dirstate.hasdir(b'a')
1098 del dirstate._map._dirs
1098 del dirstate._map._dirs
1099
1099
1100 timer(d)
1100 timer(d)
1101 fm.end()
1101 fm.end()
1102
1102
1103
1103
1104 @command(b'perfdirstate', formatteropts)
1104 @command(b'perfdirstate', formatteropts)
1105 def perfdirstate(ui, repo, **opts):
1105 def perfdirstate(ui, repo, **opts):
1106 """benchmap the time necessary to load a dirstate from scratch
1106 """benchmap the time necessary to load a dirstate from scratch
1107
1107
1108 The dirstate is loaded to the point were a "contains" request can be
1108 The dirstate is loaded to the point were a "contains" request can be
1109 answered.
1109 answered.
1110 """
1110 """
1111 opts = _byteskwargs(opts)
1111 opts = _byteskwargs(opts)
1112 timer, fm = gettimer(ui, opts)
1112 timer, fm = gettimer(ui, opts)
1113 b"a" in repo.dirstate
1113 b"a" in repo.dirstate
1114
1114
1115 def setup():
1115 def setup():
1116 repo.dirstate.invalidate()
1116 repo.dirstate.invalidate()
1117
1117
1118 def d():
1118 def d():
1119 b"a" in repo.dirstate
1119 b"a" in repo.dirstate
1120
1120
1121 timer(d, setup=setup)
1121 timer(d, setup=setup)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perfdirstatedirs', formatteropts)
1125 @command(b'perfdirstatedirs', formatteropts)
1126 def perfdirstatedirs(ui, repo, **opts):
1126 def perfdirstatedirs(ui, repo, **opts):
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1128 """
1128 """
1129 opts = _byteskwargs(opts)
1129 opts = _byteskwargs(opts)
1130 timer, fm = gettimer(ui, opts)
1130 timer, fm = gettimer(ui, opts)
1131 repo.dirstate.hasdir(b"a")
1131 repo.dirstate.hasdir(b"a")
1132
1132
1133 def setup():
1133 def setup():
1134 del repo.dirstate._map._dirs
1134 del repo.dirstate._map._dirs
1135
1135
1136 def d():
1136 def d():
1137 repo.dirstate.hasdir(b"a")
1137 repo.dirstate.hasdir(b"a")
1138
1138
1139 timer(d, setup=setup)
1139 timer(d, setup=setup)
1140 fm.end()
1140 fm.end()
1141
1141
1142
1142
1143 @command(b'perfdirstatefoldmap', formatteropts)
1143 @command(b'perfdirstatefoldmap', formatteropts)
1144 def perfdirstatefoldmap(ui, repo, **opts):
1144 def perfdirstatefoldmap(ui, repo, **opts):
1145 """benchmap a `dirstate._map.filefoldmap.get()` request
1145 """benchmap a `dirstate._map.filefoldmap.get()` request
1146
1146
1147 The dirstate filefoldmap cache is dropped between every request.
1147 The dirstate filefoldmap cache is dropped between every request.
1148 """
1148 """
1149 opts = _byteskwargs(opts)
1149 opts = _byteskwargs(opts)
1150 timer, fm = gettimer(ui, opts)
1150 timer, fm = gettimer(ui, opts)
1151 dirstate = repo.dirstate
1151 dirstate = repo.dirstate
1152 dirstate._map.filefoldmap.get(b'a')
1152 dirstate._map.filefoldmap.get(b'a')
1153
1153
1154 def setup():
1154 def setup():
1155 del dirstate._map.filefoldmap
1155 del dirstate._map.filefoldmap
1156
1156
1157 def d():
1157 def d():
1158 dirstate._map.filefoldmap.get(b'a')
1158 dirstate._map.filefoldmap.get(b'a')
1159
1159
1160 timer(d, setup=setup)
1160 timer(d, setup=setup)
1161 fm.end()
1161 fm.end()
1162
1162
1163
1163
1164 @command(b'perfdirfoldmap', formatteropts)
1164 @command(b'perfdirfoldmap', formatteropts)
1165 def perfdirfoldmap(ui, repo, **opts):
1165 def perfdirfoldmap(ui, repo, **opts):
1166 """benchmap a `dirstate._map.dirfoldmap.get()` request
1166 """benchmap a `dirstate._map.dirfoldmap.get()` request
1167
1167
1168 The dirstate dirfoldmap cache is dropped between every request.
1168 The dirstate dirfoldmap cache is dropped between every request.
1169 """
1169 """
1170 opts = _byteskwargs(opts)
1170 opts = _byteskwargs(opts)
1171 timer, fm = gettimer(ui, opts)
1171 timer, fm = gettimer(ui, opts)
1172 dirstate = repo.dirstate
1172 dirstate = repo.dirstate
1173 dirstate._map.dirfoldmap.get(b'a')
1173 dirstate._map.dirfoldmap.get(b'a')
1174
1174
1175 def setup():
1175 def setup():
1176 del dirstate._map.dirfoldmap
1176 del dirstate._map.dirfoldmap
1177 del dirstate._map._dirs
1177 del dirstate._map._dirs
1178
1178
1179 def d():
1179 def d():
1180 dirstate._map.dirfoldmap.get(b'a')
1180 dirstate._map.dirfoldmap.get(b'a')
1181
1181
1182 timer(d, setup=setup)
1182 timer(d, setup=setup)
1183 fm.end()
1183 fm.end()
1184
1184
1185
1185
1186 @command(b'perfdirstatewrite', formatteropts)
1186 @command(b'perfdirstatewrite', formatteropts)
1187 def perfdirstatewrite(ui, repo, **opts):
1187 def perfdirstatewrite(ui, repo, **opts):
1188 """benchmap the time it take to write a dirstate on disk
1189 """
1188 opts = _byteskwargs(opts)
1190 opts = _byteskwargs(opts)
1189 timer, fm = gettimer(ui, opts)
1191 timer, fm = gettimer(ui, opts)
1190 ds = repo.dirstate
1192 ds = repo.dirstate
1191 b"a" in ds
1193 b"a" in ds
1192
1194
1193 def d():
1195 def d():
1194 ds._dirty = True
1196 ds._dirty = True
1195 ds.write(repo.currenttransaction())
1197 ds.write(repo.currenttransaction())
1196
1198
1197 timer(d)
1199 timer(d)
1198 fm.end()
1200 fm.end()
1199
1201
1200
1202
1201 def _getmergerevs(repo, opts):
1203 def _getmergerevs(repo, opts):
1202 """parse command argument to return rev involved in merge
1204 """parse command argument to return rev involved in merge
1203
1205
1204 input: options dictionnary with `rev`, `from` and `bse`
1206 input: options dictionnary with `rev`, `from` and `bse`
1205 output: (localctx, otherctx, basectx)
1207 output: (localctx, otherctx, basectx)
1206 """
1208 """
1207 if opts[b'from']:
1209 if opts[b'from']:
1208 fromrev = scmutil.revsingle(repo, opts[b'from'])
1210 fromrev = scmutil.revsingle(repo, opts[b'from'])
1209 wctx = repo[fromrev]
1211 wctx = repo[fromrev]
1210 else:
1212 else:
1211 wctx = repo[None]
1213 wctx = repo[None]
1212 # we don't want working dir files to be stat'd in the benchmark, so
1214 # we don't want working dir files to be stat'd in the benchmark, so
1213 # prime that cache
1215 # prime that cache
1214 wctx.dirty()
1216 wctx.dirty()
1215 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1217 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1216 if opts[b'base']:
1218 if opts[b'base']:
1217 fromrev = scmutil.revsingle(repo, opts[b'base'])
1219 fromrev = scmutil.revsingle(repo, opts[b'base'])
1218 ancestor = repo[fromrev]
1220 ancestor = repo[fromrev]
1219 else:
1221 else:
1220 ancestor = wctx.ancestor(rctx)
1222 ancestor = wctx.ancestor(rctx)
1221 return (wctx, rctx, ancestor)
1223 return (wctx, rctx, ancestor)
1222
1224
1223
1225
1224 @command(
1226 @command(
1225 b'perfmergecalculate',
1227 b'perfmergecalculate',
1226 [
1228 [
1227 (b'r', b'rev', b'.', b'rev to merge against'),
1229 (b'r', b'rev', b'.', b'rev to merge against'),
1228 (b'', b'from', b'', b'rev to merge from'),
1230 (b'', b'from', b'', b'rev to merge from'),
1229 (b'', b'base', b'', b'the revision to use as base'),
1231 (b'', b'base', b'', b'the revision to use as base'),
1230 ]
1232 ]
1231 + formatteropts,
1233 + formatteropts,
1232 )
1234 )
1233 def perfmergecalculate(ui, repo, **opts):
1235 def perfmergecalculate(ui, repo, **opts):
1234 opts = _byteskwargs(opts)
1236 opts = _byteskwargs(opts)
1235 timer, fm = gettimer(ui, opts)
1237 timer, fm = gettimer(ui, opts)
1236
1238
1237 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1239 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1238
1240
1239 def d():
1241 def d():
1240 # acceptremote is True because we don't want prompts in the middle of
1242 # acceptremote is True because we don't want prompts in the middle of
1241 # our benchmark
1243 # our benchmark
1242 merge.calculateupdates(
1244 merge.calculateupdates(
1243 repo,
1245 repo,
1244 wctx,
1246 wctx,
1245 rctx,
1247 rctx,
1246 [ancestor],
1248 [ancestor],
1247 branchmerge=False,
1249 branchmerge=False,
1248 force=False,
1250 force=False,
1249 acceptremote=True,
1251 acceptremote=True,
1250 followcopies=True,
1252 followcopies=True,
1251 )
1253 )
1252
1254
1253 timer(d)
1255 timer(d)
1254 fm.end()
1256 fm.end()
1255
1257
1256
1258
1257 @command(
1259 @command(
1258 b'perfmergecopies',
1260 b'perfmergecopies',
1259 [
1261 [
1260 (b'r', b'rev', b'.', b'rev to merge against'),
1262 (b'r', b'rev', b'.', b'rev to merge against'),
1261 (b'', b'from', b'', b'rev to merge from'),
1263 (b'', b'from', b'', b'rev to merge from'),
1262 (b'', b'base', b'', b'the revision to use as base'),
1264 (b'', b'base', b'', b'the revision to use as base'),
1263 ]
1265 ]
1264 + formatteropts,
1266 + formatteropts,
1265 )
1267 )
1266 def perfmergecopies(ui, repo, **opts):
1268 def perfmergecopies(ui, repo, **opts):
1267 """measure runtime of `copies.mergecopies`"""
1269 """measure runtime of `copies.mergecopies`"""
1268 opts = _byteskwargs(opts)
1270 opts = _byteskwargs(opts)
1269 timer, fm = gettimer(ui, opts)
1271 timer, fm = gettimer(ui, opts)
1270 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1272 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1271
1273
1272 def d():
1274 def d():
1273 # acceptremote is True because we don't want prompts in the middle of
1275 # acceptremote is True because we don't want prompts in the middle of
1274 # our benchmark
1276 # our benchmark
1275 copies.mergecopies(repo, wctx, rctx, ancestor)
1277 copies.mergecopies(repo, wctx, rctx, ancestor)
1276
1278
1277 timer(d)
1279 timer(d)
1278 fm.end()
1280 fm.end()
1279
1281
1280
1282
1281 @command(b'perfpathcopies', [], b"REV REV")
1283 @command(b'perfpathcopies', [], b"REV REV")
1282 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1284 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1283 """benchmark the copy tracing logic"""
1285 """benchmark the copy tracing logic"""
1284 opts = _byteskwargs(opts)
1286 opts = _byteskwargs(opts)
1285 timer, fm = gettimer(ui, opts)
1287 timer, fm = gettimer(ui, opts)
1286 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1288 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1287 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1289 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1288
1290
1289 def d():
1291 def d():
1290 copies.pathcopies(ctx1, ctx2)
1292 copies.pathcopies(ctx1, ctx2)
1291
1293
1292 timer(d)
1294 timer(d)
1293 fm.end()
1295 fm.end()
1294
1296
1295
1297
1296 @command(
1298 @command(
1297 b'perfphases',
1299 b'perfphases',
1298 [(b'', b'full', False, b'include file reading time too'),],
1300 [(b'', b'full', False, b'include file reading time too'),],
1299 b"",
1301 b"",
1300 )
1302 )
1301 def perfphases(ui, repo, **opts):
1303 def perfphases(ui, repo, **opts):
1302 """benchmark phasesets computation"""
1304 """benchmark phasesets computation"""
1303 opts = _byteskwargs(opts)
1305 opts = _byteskwargs(opts)
1304 timer, fm = gettimer(ui, opts)
1306 timer, fm = gettimer(ui, opts)
1305 _phases = repo._phasecache
1307 _phases = repo._phasecache
1306 full = opts.get(b'full')
1308 full = opts.get(b'full')
1307
1309
1308 def d():
1310 def d():
1309 phases = _phases
1311 phases = _phases
1310 if full:
1312 if full:
1311 clearfilecache(repo, b'_phasecache')
1313 clearfilecache(repo, b'_phasecache')
1312 phases = repo._phasecache
1314 phases = repo._phasecache
1313 phases.invalidate()
1315 phases.invalidate()
1314 phases.loadphaserevs(repo)
1316 phases.loadphaserevs(repo)
1315
1317
1316 timer(d)
1318 timer(d)
1317 fm.end()
1319 fm.end()
1318
1320
1319
1321
1320 @command(b'perfphasesremote', [], b"[DEST]")
1322 @command(b'perfphasesremote', [], b"[DEST]")
1321 def perfphasesremote(ui, repo, dest=None, **opts):
1323 def perfphasesremote(ui, repo, dest=None, **opts):
1322 """benchmark time needed to analyse phases of the remote server"""
1324 """benchmark time needed to analyse phases of the remote server"""
1323 from mercurial.node import bin
1325 from mercurial.node import bin
1324 from mercurial import (
1326 from mercurial import (
1325 exchange,
1327 exchange,
1326 hg,
1328 hg,
1327 phases,
1329 phases,
1328 )
1330 )
1329
1331
1330 opts = _byteskwargs(opts)
1332 opts = _byteskwargs(opts)
1331 timer, fm = gettimer(ui, opts)
1333 timer, fm = gettimer(ui, opts)
1332
1334
1333 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1335 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1334 if not path:
1336 if not path:
1335 raise error.Abort(
1337 raise error.Abort(
1336 b'default repository not configured!',
1338 b'default repository not configured!',
1337 hint=b"see 'hg help config.paths'",
1339 hint=b"see 'hg help config.paths'",
1338 )
1340 )
1339 dest = path.pushloc or path.loc
1341 dest = path.pushloc or path.loc
1340 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1342 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1341 other = hg.peer(repo, opts, dest)
1343 other = hg.peer(repo, opts, dest)
1342
1344
1343 # easier to perform discovery through the operation
1345 # easier to perform discovery through the operation
1344 op = exchange.pushoperation(repo, other)
1346 op = exchange.pushoperation(repo, other)
1345 exchange._pushdiscoverychangeset(op)
1347 exchange._pushdiscoverychangeset(op)
1346
1348
1347 remotesubset = op.fallbackheads
1349 remotesubset = op.fallbackheads
1348
1350
1349 with other.commandexecutor() as e:
1351 with other.commandexecutor() as e:
1350 remotephases = e.callcommand(
1352 remotephases = e.callcommand(
1351 b'listkeys', {b'namespace': b'phases'}
1353 b'listkeys', {b'namespace': b'phases'}
1352 ).result()
1354 ).result()
1353 del other
1355 del other
1354 publishing = remotephases.get(b'publishing', False)
1356 publishing = remotephases.get(b'publishing', False)
1355 if publishing:
1357 if publishing:
1356 ui.statusnoi18n(b'publishing: yes\n')
1358 ui.statusnoi18n(b'publishing: yes\n')
1357 else:
1359 else:
1358 ui.statusnoi18n(b'publishing: no\n')
1360 ui.statusnoi18n(b'publishing: no\n')
1359
1361
1360 nodemap = repo.changelog.nodemap
1362 nodemap = repo.changelog.nodemap
1361 nonpublishroots = 0
1363 nonpublishroots = 0
1362 for nhex, phase in remotephases.iteritems():
1364 for nhex, phase in remotephases.iteritems():
1363 if nhex == b'publishing': # ignore data related to publish option
1365 if nhex == b'publishing': # ignore data related to publish option
1364 continue
1366 continue
1365 node = bin(nhex)
1367 node = bin(nhex)
1366 if node in nodemap and int(phase):
1368 if node in nodemap and int(phase):
1367 nonpublishroots += 1
1369 nonpublishroots += 1
1368 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1370 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1369 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1371 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1370
1372
1371 def d():
1373 def d():
1372 phases.remotephasessummary(repo, remotesubset, remotephases)
1374 phases.remotephasessummary(repo, remotesubset, remotephases)
1373
1375
1374 timer(d)
1376 timer(d)
1375 fm.end()
1377 fm.end()
1376
1378
1377
1379
1378 @command(
1380 @command(
1379 b'perfmanifest',
1381 b'perfmanifest',
1380 [
1382 [
1381 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1383 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1382 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1384 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1383 ]
1385 ]
1384 + formatteropts,
1386 + formatteropts,
1385 b'REV|NODE',
1387 b'REV|NODE',
1386 )
1388 )
1387 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1389 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1388 """benchmark the time to read a manifest from disk and return a usable
1390 """benchmark the time to read a manifest from disk and return a usable
1389 dict-like object
1391 dict-like object
1390
1392
1391 Manifest caches are cleared before retrieval."""
1393 Manifest caches are cleared before retrieval."""
1392 opts = _byteskwargs(opts)
1394 opts = _byteskwargs(opts)
1393 timer, fm = gettimer(ui, opts)
1395 timer, fm = gettimer(ui, opts)
1394 if not manifest_rev:
1396 if not manifest_rev:
1395 ctx = scmutil.revsingle(repo, rev, rev)
1397 ctx = scmutil.revsingle(repo, rev, rev)
1396 t = ctx.manifestnode()
1398 t = ctx.manifestnode()
1397 else:
1399 else:
1398 from mercurial.node import bin
1400 from mercurial.node import bin
1399
1401
1400 if len(rev) == 40:
1402 if len(rev) == 40:
1401 t = bin(rev)
1403 t = bin(rev)
1402 else:
1404 else:
1403 try:
1405 try:
1404 rev = int(rev)
1406 rev = int(rev)
1405
1407
1406 if util.safehasattr(repo.manifestlog, b'getstorage'):
1408 if util.safehasattr(repo.manifestlog, b'getstorage'):
1407 t = repo.manifestlog.getstorage(b'').node(rev)
1409 t = repo.manifestlog.getstorage(b'').node(rev)
1408 else:
1410 else:
1409 t = repo.manifestlog._revlog.lookup(rev)
1411 t = repo.manifestlog._revlog.lookup(rev)
1410 except ValueError:
1412 except ValueError:
1411 raise error.Abort(
1413 raise error.Abort(
1412 b'manifest revision must be integer or full node'
1414 b'manifest revision must be integer or full node'
1413 )
1415 )
1414
1416
1415 def d():
1417 def d():
1416 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1418 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1417 repo.manifestlog[t].read()
1419 repo.manifestlog[t].read()
1418
1420
1419 timer(d)
1421 timer(d)
1420 fm.end()
1422 fm.end()
1421
1423
1422
1424
1423 @command(b'perfchangeset', formatteropts)
1425 @command(b'perfchangeset', formatteropts)
1424 def perfchangeset(ui, repo, rev, **opts):
1426 def perfchangeset(ui, repo, rev, **opts):
1425 opts = _byteskwargs(opts)
1427 opts = _byteskwargs(opts)
1426 timer, fm = gettimer(ui, opts)
1428 timer, fm = gettimer(ui, opts)
1427 n = scmutil.revsingle(repo, rev).node()
1429 n = scmutil.revsingle(repo, rev).node()
1428
1430
1429 def d():
1431 def d():
1430 repo.changelog.read(n)
1432 repo.changelog.read(n)
1431 # repo.changelog._cache = None
1433 # repo.changelog._cache = None
1432
1434
1433 timer(d)
1435 timer(d)
1434 fm.end()
1436 fm.end()
1435
1437
1436
1438
1437 @command(b'perfignore', formatteropts)
1439 @command(b'perfignore', formatteropts)
1438 def perfignore(ui, repo, **opts):
1440 def perfignore(ui, repo, **opts):
1439 """benchmark operation related to computing ignore"""
1441 """benchmark operation related to computing ignore"""
1440 opts = _byteskwargs(opts)
1442 opts = _byteskwargs(opts)
1441 timer, fm = gettimer(ui, opts)
1443 timer, fm = gettimer(ui, opts)
1442 dirstate = repo.dirstate
1444 dirstate = repo.dirstate
1443
1445
1444 def setupone():
1446 def setupone():
1445 dirstate.invalidate()
1447 dirstate.invalidate()
1446 clearfilecache(dirstate, b'_ignore')
1448 clearfilecache(dirstate, b'_ignore')
1447
1449
1448 def runone():
1450 def runone():
1449 dirstate._ignore
1451 dirstate._ignore
1450
1452
1451 timer(runone, setup=setupone, title=b"load")
1453 timer(runone, setup=setupone, title=b"load")
1452 fm.end()
1454 fm.end()
1453
1455
1454
1456
1455 @command(
1457 @command(
1456 b'perfindex',
1458 b'perfindex',
1457 [
1459 [
1458 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1460 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1459 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1461 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1460 ]
1462 ]
1461 + formatteropts,
1463 + formatteropts,
1462 )
1464 )
1463 def perfindex(ui, repo, **opts):
1465 def perfindex(ui, repo, **opts):
1464 """benchmark index creation time followed by a lookup
1466 """benchmark index creation time followed by a lookup
1465
1467
1466 The default is to look `tip` up. Depending on the index implementation,
1468 The default is to look `tip` up. Depending on the index implementation,
1467 the revision looked up can matters. For example, an implementation
1469 the revision looked up can matters. For example, an implementation
1468 scanning the index will have a faster lookup time for `--rev tip` than for
1470 scanning the index will have a faster lookup time for `--rev tip` than for
1469 `--rev 0`. The number of looked up revisions and their order can also
1471 `--rev 0`. The number of looked up revisions and their order can also
1470 matters.
1472 matters.
1471
1473
1472 Example of useful set to test:
1474 Example of useful set to test:
1473 * tip
1475 * tip
1474 * 0
1476 * 0
1475 * -10:
1477 * -10:
1476 * :10
1478 * :10
1477 * -10: + :10
1479 * -10: + :10
1478 * :10: + -10:
1480 * :10: + -10:
1479 * -10000:
1481 * -10000:
1480 * -10000: + 0
1482 * -10000: + 0
1481
1483
1482 It is not currently possible to check for lookup of a missing node. For
1484 It is not currently possible to check for lookup of a missing node. For
1483 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1485 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1484 import mercurial.revlog
1486 import mercurial.revlog
1485
1487
1486 opts = _byteskwargs(opts)
1488 opts = _byteskwargs(opts)
1487 timer, fm = gettimer(ui, opts)
1489 timer, fm = gettimer(ui, opts)
1488 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1490 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1489 if opts[b'no_lookup']:
1491 if opts[b'no_lookup']:
1490 if opts['rev']:
1492 if opts['rev']:
1491 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1493 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1492 nodes = []
1494 nodes = []
1493 elif not opts[b'rev']:
1495 elif not opts[b'rev']:
1494 nodes = [repo[b"tip"].node()]
1496 nodes = [repo[b"tip"].node()]
1495 else:
1497 else:
1496 revs = scmutil.revrange(repo, opts[b'rev'])
1498 revs = scmutil.revrange(repo, opts[b'rev'])
1497 cl = repo.changelog
1499 cl = repo.changelog
1498 nodes = [cl.node(r) for r in revs]
1500 nodes = [cl.node(r) for r in revs]
1499
1501
1500 unfi = repo.unfiltered()
1502 unfi = repo.unfiltered()
1501 # find the filecache func directly
1503 # find the filecache func directly
1502 # This avoid polluting the benchmark with the filecache logic
1504 # This avoid polluting the benchmark with the filecache logic
1503 makecl = unfi.__class__.changelog.func
1505 makecl = unfi.__class__.changelog.func
1504
1506
1505 def setup():
1507 def setup():
1506 # probably not necessary, but for good measure
1508 # probably not necessary, but for good measure
1507 clearchangelog(unfi)
1509 clearchangelog(unfi)
1508
1510
1509 def d():
1511 def d():
1510 cl = makecl(unfi)
1512 cl = makecl(unfi)
1511 for n in nodes:
1513 for n in nodes:
1512 cl.rev(n)
1514 cl.rev(n)
1513
1515
1514 timer(d, setup=setup)
1516 timer(d, setup=setup)
1515 fm.end()
1517 fm.end()
1516
1518
1517
1519
1518 @command(
1520 @command(
1519 b'perfnodemap',
1521 b'perfnodemap',
1520 [
1522 [
1521 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1523 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1522 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1524 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1523 ]
1525 ]
1524 + formatteropts,
1526 + formatteropts,
1525 )
1527 )
1526 def perfnodemap(ui, repo, **opts):
1528 def perfnodemap(ui, repo, **opts):
1527 """benchmark the time necessary to look up revision from a cold nodemap
1529 """benchmark the time necessary to look up revision from a cold nodemap
1528
1530
1529 Depending on the implementation, the amount and order of revision we look
1531 Depending on the implementation, the amount and order of revision we look
1530 up can varies. Example of useful set to test:
1532 up can varies. Example of useful set to test:
1531 * tip
1533 * tip
1532 * 0
1534 * 0
1533 * -10:
1535 * -10:
1534 * :10
1536 * :10
1535 * -10: + :10
1537 * -10: + :10
1536 * :10: + -10:
1538 * :10: + -10:
1537 * -10000:
1539 * -10000:
1538 * -10000: + 0
1540 * -10000: + 0
1539
1541
1540 The command currently focus on valid binary lookup. Benchmarking for
1542 The command currently focus on valid binary lookup. Benchmarking for
1541 hexlookup, prefix lookup and missing lookup would also be valuable.
1543 hexlookup, prefix lookup and missing lookup would also be valuable.
1542 """
1544 """
1543 import mercurial.revlog
1545 import mercurial.revlog
1544
1546
1545 opts = _byteskwargs(opts)
1547 opts = _byteskwargs(opts)
1546 timer, fm = gettimer(ui, opts)
1548 timer, fm = gettimer(ui, opts)
1547 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1549 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1548
1550
1549 unfi = repo.unfiltered()
1551 unfi = repo.unfiltered()
1550 clearcaches = opts['clear_caches']
1552 clearcaches = opts['clear_caches']
1551 # find the filecache func directly
1553 # find the filecache func directly
1552 # This avoid polluting the benchmark with the filecache logic
1554 # This avoid polluting the benchmark with the filecache logic
1553 makecl = unfi.__class__.changelog.func
1555 makecl = unfi.__class__.changelog.func
1554 if not opts[b'rev']:
1556 if not opts[b'rev']:
1555 raise error.Abort('use --rev to specify revisions to look up')
1557 raise error.Abort('use --rev to specify revisions to look up')
1556 revs = scmutil.revrange(repo, opts[b'rev'])
1558 revs = scmutil.revrange(repo, opts[b'rev'])
1557 cl = repo.changelog
1559 cl = repo.changelog
1558 nodes = [cl.node(r) for r in revs]
1560 nodes = [cl.node(r) for r in revs]
1559
1561
1560 # use a list to pass reference to a nodemap from one closure to the next
1562 # use a list to pass reference to a nodemap from one closure to the next
1561 nodeget = [None]
1563 nodeget = [None]
1562
1564
1563 def setnodeget():
1565 def setnodeget():
1564 # probably not necessary, but for good measure
1566 # probably not necessary, but for good measure
1565 clearchangelog(unfi)
1567 clearchangelog(unfi)
1566 nodeget[0] = makecl(unfi).nodemap.get
1568 nodeget[0] = makecl(unfi).nodemap.get
1567
1569
1568 def d():
1570 def d():
1569 get = nodeget[0]
1571 get = nodeget[0]
1570 for n in nodes:
1572 for n in nodes:
1571 get(n)
1573 get(n)
1572
1574
1573 setup = None
1575 setup = None
1574 if clearcaches:
1576 if clearcaches:
1575
1577
1576 def setup():
1578 def setup():
1577 setnodeget()
1579 setnodeget()
1578
1580
1579 else:
1581 else:
1580 setnodeget()
1582 setnodeget()
1581 d() # prewarm the data structure
1583 d() # prewarm the data structure
1582 timer(d, setup=setup)
1584 timer(d, setup=setup)
1583 fm.end()
1585 fm.end()
1584
1586
1585
1587
1586 @command(b'perfstartup', formatteropts)
1588 @command(b'perfstartup', formatteropts)
1587 def perfstartup(ui, repo, **opts):
1589 def perfstartup(ui, repo, **opts):
1588 opts = _byteskwargs(opts)
1590 opts = _byteskwargs(opts)
1589 timer, fm = gettimer(ui, opts)
1591 timer, fm = gettimer(ui, opts)
1590
1592
1591 def d():
1593 def d():
1592 if os.name != r'nt':
1594 if os.name != r'nt':
1593 os.system(
1595 os.system(
1594 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1596 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1595 )
1597 )
1596 else:
1598 else:
1597 os.environ[r'HGRCPATH'] = r' '
1599 os.environ[r'HGRCPATH'] = r' '
1598 os.system(r"%s version -q > NUL" % sys.argv[0])
1600 os.system(r"%s version -q > NUL" % sys.argv[0])
1599
1601
1600 timer(d)
1602 timer(d)
1601 fm.end()
1603 fm.end()
1602
1604
1603
1605
1604 @command(b'perfparents', formatteropts)
1606 @command(b'perfparents', formatteropts)
1605 def perfparents(ui, repo, **opts):
1607 def perfparents(ui, repo, **opts):
1606 """benchmark the time necessary to fetch one changeset's parents.
1608 """benchmark the time necessary to fetch one changeset's parents.
1607
1609
1608 The fetch is done using the `node identifier`, traversing all object layers
1610 The fetch is done using the `node identifier`, traversing all object layers
1609 from the repository object. The first N revisions will be used for this
1611 from the repository object. The first N revisions will be used for this
1610 benchmark. N is controlled by the ``perf.parentscount`` config option
1612 benchmark. N is controlled by the ``perf.parentscount`` config option
1611 (default: 1000).
1613 (default: 1000).
1612 """
1614 """
1613 opts = _byteskwargs(opts)
1615 opts = _byteskwargs(opts)
1614 timer, fm = gettimer(ui, opts)
1616 timer, fm = gettimer(ui, opts)
1615 # control the number of commits perfparents iterates over
1617 # control the number of commits perfparents iterates over
1616 # experimental config: perf.parentscount
1618 # experimental config: perf.parentscount
1617 count = getint(ui, b"perf", b"parentscount", 1000)
1619 count = getint(ui, b"perf", b"parentscount", 1000)
1618 if len(repo.changelog) < count:
1620 if len(repo.changelog) < count:
1619 raise error.Abort(b"repo needs %d commits for this test" % count)
1621 raise error.Abort(b"repo needs %d commits for this test" % count)
1620 repo = repo.unfiltered()
1622 repo = repo.unfiltered()
1621 nl = [repo.changelog.node(i) for i in _xrange(count)]
1623 nl = [repo.changelog.node(i) for i in _xrange(count)]
1622
1624
1623 def d():
1625 def d():
1624 for n in nl:
1626 for n in nl:
1625 repo.changelog.parents(n)
1627 repo.changelog.parents(n)
1626
1628
1627 timer(d)
1629 timer(d)
1628 fm.end()
1630 fm.end()
1629
1631
1630
1632
1631 @command(b'perfctxfiles', formatteropts)
1633 @command(b'perfctxfiles', formatteropts)
1632 def perfctxfiles(ui, repo, x, **opts):
1634 def perfctxfiles(ui, repo, x, **opts):
1633 opts = _byteskwargs(opts)
1635 opts = _byteskwargs(opts)
1634 x = int(x)
1636 x = int(x)
1635 timer, fm = gettimer(ui, opts)
1637 timer, fm = gettimer(ui, opts)
1636
1638
1637 def d():
1639 def d():
1638 len(repo[x].files())
1640 len(repo[x].files())
1639
1641
1640 timer(d)
1642 timer(d)
1641 fm.end()
1643 fm.end()
1642
1644
1643
1645
1644 @command(b'perfrawfiles', formatteropts)
1646 @command(b'perfrawfiles', formatteropts)
1645 def perfrawfiles(ui, repo, x, **opts):
1647 def perfrawfiles(ui, repo, x, **opts):
1646 opts = _byteskwargs(opts)
1648 opts = _byteskwargs(opts)
1647 x = int(x)
1649 x = int(x)
1648 timer, fm = gettimer(ui, opts)
1650 timer, fm = gettimer(ui, opts)
1649 cl = repo.changelog
1651 cl = repo.changelog
1650
1652
1651 def d():
1653 def d():
1652 len(cl.read(x)[3])
1654 len(cl.read(x)[3])
1653
1655
1654 timer(d)
1656 timer(d)
1655 fm.end()
1657 fm.end()
1656
1658
1657
1659
1658 @command(b'perflookup', formatteropts)
1660 @command(b'perflookup', formatteropts)
1659 def perflookup(ui, repo, rev, **opts):
1661 def perflookup(ui, repo, rev, **opts):
1660 opts = _byteskwargs(opts)
1662 opts = _byteskwargs(opts)
1661 timer, fm = gettimer(ui, opts)
1663 timer, fm = gettimer(ui, opts)
1662 timer(lambda: len(repo.lookup(rev)))
1664 timer(lambda: len(repo.lookup(rev)))
1663 fm.end()
1665 fm.end()
1664
1666
1665
1667
1666 @command(
1668 @command(
1667 b'perflinelogedits',
1669 b'perflinelogedits',
1668 [
1670 [
1669 (b'n', b'edits', 10000, b'number of edits'),
1671 (b'n', b'edits', 10000, b'number of edits'),
1670 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1672 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1671 ],
1673 ],
1672 norepo=True,
1674 norepo=True,
1673 )
1675 )
1674 def perflinelogedits(ui, **opts):
1676 def perflinelogedits(ui, **opts):
1675 from mercurial import linelog
1677 from mercurial import linelog
1676
1678
1677 opts = _byteskwargs(opts)
1679 opts = _byteskwargs(opts)
1678
1680
1679 edits = opts[b'edits']
1681 edits = opts[b'edits']
1680 maxhunklines = opts[b'max_hunk_lines']
1682 maxhunklines = opts[b'max_hunk_lines']
1681
1683
1682 maxb1 = 100000
1684 maxb1 = 100000
1683 random.seed(0)
1685 random.seed(0)
1684 randint = random.randint
1686 randint = random.randint
1685 currentlines = 0
1687 currentlines = 0
1686 arglist = []
1688 arglist = []
1687 for rev in _xrange(edits):
1689 for rev in _xrange(edits):
1688 a1 = randint(0, currentlines)
1690 a1 = randint(0, currentlines)
1689 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1691 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1690 b1 = randint(0, maxb1)
1692 b1 = randint(0, maxb1)
1691 b2 = randint(b1, b1 + maxhunklines)
1693 b2 = randint(b1, b1 + maxhunklines)
1692 currentlines += (b2 - b1) - (a2 - a1)
1694 currentlines += (b2 - b1) - (a2 - a1)
1693 arglist.append((rev, a1, a2, b1, b2))
1695 arglist.append((rev, a1, a2, b1, b2))
1694
1696
1695 def d():
1697 def d():
1696 ll = linelog.linelog()
1698 ll = linelog.linelog()
1697 for args in arglist:
1699 for args in arglist:
1698 ll.replacelines(*args)
1700 ll.replacelines(*args)
1699
1701
1700 timer, fm = gettimer(ui, opts)
1702 timer, fm = gettimer(ui, opts)
1701 timer(d)
1703 timer(d)
1702 fm.end()
1704 fm.end()
1703
1705
1704
1706
1705 @command(b'perfrevrange', formatteropts)
1707 @command(b'perfrevrange', formatteropts)
1706 def perfrevrange(ui, repo, *specs, **opts):
1708 def perfrevrange(ui, repo, *specs, **opts):
1707 opts = _byteskwargs(opts)
1709 opts = _byteskwargs(opts)
1708 timer, fm = gettimer(ui, opts)
1710 timer, fm = gettimer(ui, opts)
1709 revrange = scmutil.revrange
1711 revrange = scmutil.revrange
1710 timer(lambda: len(revrange(repo, specs)))
1712 timer(lambda: len(revrange(repo, specs)))
1711 fm.end()
1713 fm.end()
1712
1714
1713
1715
1714 @command(b'perfnodelookup', formatteropts)
1716 @command(b'perfnodelookup', formatteropts)
1715 def perfnodelookup(ui, repo, rev, **opts):
1717 def perfnodelookup(ui, repo, rev, **opts):
1716 opts = _byteskwargs(opts)
1718 opts = _byteskwargs(opts)
1717 timer, fm = gettimer(ui, opts)
1719 timer, fm = gettimer(ui, opts)
1718 import mercurial.revlog
1720 import mercurial.revlog
1719
1721
1720 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1722 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1721 n = scmutil.revsingle(repo, rev).node()
1723 n = scmutil.revsingle(repo, rev).node()
1722 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1724 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1723
1725
1724 def d():
1726 def d():
1725 cl.rev(n)
1727 cl.rev(n)
1726 clearcaches(cl)
1728 clearcaches(cl)
1727
1729
1728 timer(d)
1730 timer(d)
1729 fm.end()
1731 fm.end()
1730
1732
1731
1733
1732 @command(
1734 @command(
1733 b'perflog',
1735 b'perflog',
1734 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1736 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1735 )
1737 )
1736 def perflog(ui, repo, rev=None, **opts):
1738 def perflog(ui, repo, rev=None, **opts):
1737 opts = _byteskwargs(opts)
1739 opts = _byteskwargs(opts)
1738 if rev is None:
1740 if rev is None:
1739 rev = []
1741 rev = []
1740 timer, fm = gettimer(ui, opts)
1742 timer, fm = gettimer(ui, opts)
1741 ui.pushbuffer()
1743 ui.pushbuffer()
1742 timer(
1744 timer(
1743 lambda: commands.log(
1745 lambda: commands.log(
1744 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1746 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1745 )
1747 )
1746 )
1748 )
1747 ui.popbuffer()
1749 ui.popbuffer()
1748 fm.end()
1750 fm.end()
1749
1751
1750
1752
1751 @command(b'perfmoonwalk', formatteropts)
1753 @command(b'perfmoonwalk', formatteropts)
1752 def perfmoonwalk(ui, repo, **opts):
1754 def perfmoonwalk(ui, repo, **opts):
1753 """benchmark walking the changelog backwards
1755 """benchmark walking the changelog backwards
1754
1756
1755 This also loads the changelog data for each revision in the changelog.
1757 This also loads the changelog data for each revision in the changelog.
1756 """
1758 """
1757 opts = _byteskwargs(opts)
1759 opts = _byteskwargs(opts)
1758 timer, fm = gettimer(ui, opts)
1760 timer, fm = gettimer(ui, opts)
1759
1761
1760 def moonwalk():
1762 def moonwalk():
1761 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1763 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1762 ctx = repo[i]
1764 ctx = repo[i]
1763 ctx.branch() # read changelog data (in addition to the index)
1765 ctx.branch() # read changelog data (in addition to the index)
1764
1766
1765 timer(moonwalk)
1767 timer(moonwalk)
1766 fm.end()
1768 fm.end()
1767
1769
1768
1770
1769 @command(
1771 @command(
1770 b'perftemplating',
1772 b'perftemplating',
1771 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1773 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1772 )
1774 )
1773 def perftemplating(ui, repo, testedtemplate=None, **opts):
1775 def perftemplating(ui, repo, testedtemplate=None, **opts):
1774 """test the rendering time of a given template"""
1776 """test the rendering time of a given template"""
1775 if makelogtemplater is None:
1777 if makelogtemplater is None:
1776 raise error.Abort(
1778 raise error.Abort(
1777 b"perftemplating not available with this Mercurial",
1779 b"perftemplating not available with this Mercurial",
1778 hint=b"use 4.3 or later",
1780 hint=b"use 4.3 or later",
1779 )
1781 )
1780
1782
1781 opts = _byteskwargs(opts)
1783 opts = _byteskwargs(opts)
1782
1784
1783 nullui = ui.copy()
1785 nullui = ui.copy()
1784 nullui.fout = open(os.devnull, r'wb')
1786 nullui.fout = open(os.devnull, r'wb')
1785 nullui.disablepager()
1787 nullui.disablepager()
1786 revs = opts.get(b'rev')
1788 revs = opts.get(b'rev')
1787 if not revs:
1789 if not revs:
1788 revs = [b'all()']
1790 revs = [b'all()']
1789 revs = list(scmutil.revrange(repo, revs))
1791 revs = list(scmutil.revrange(repo, revs))
1790
1792
1791 defaulttemplate = (
1793 defaulttemplate = (
1792 b'{date|shortdate} [{rev}:{node|short}]'
1794 b'{date|shortdate} [{rev}:{node|short}]'
1793 b' {author|person}: {desc|firstline}\n'
1795 b' {author|person}: {desc|firstline}\n'
1794 )
1796 )
1795 if testedtemplate is None:
1797 if testedtemplate is None:
1796 testedtemplate = defaulttemplate
1798 testedtemplate = defaulttemplate
1797 displayer = makelogtemplater(nullui, repo, testedtemplate)
1799 displayer = makelogtemplater(nullui, repo, testedtemplate)
1798
1800
1799 def format():
1801 def format():
1800 for r in revs:
1802 for r in revs:
1801 ctx = repo[r]
1803 ctx = repo[r]
1802 displayer.show(ctx)
1804 displayer.show(ctx)
1803 displayer.flush(ctx)
1805 displayer.flush(ctx)
1804
1806
1805 timer, fm = gettimer(ui, opts)
1807 timer, fm = gettimer(ui, opts)
1806 timer(format)
1808 timer(format)
1807 fm.end()
1809 fm.end()
1808
1810
1809
1811
1810 def _displaystats(ui, opts, entries, data):
1812 def _displaystats(ui, opts, entries, data):
1811 pass
1813 pass
1812 # use a second formatter because the data are quite different, not sure
1814 # use a second formatter because the data are quite different, not sure
1813 # how it flies with the templater.
1815 # how it flies with the templater.
1814 fm = ui.formatter(b'perf-stats', opts)
1816 fm = ui.formatter(b'perf-stats', opts)
1815 for key, title in entries:
1817 for key, title in entries:
1816 values = data[key]
1818 values = data[key]
1817 nbvalues = len(data)
1819 nbvalues = len(data)
1818 values.sort()
1820 values.sort()
1819 stats = {
1821 stats = {
1820 'key': key,
1822 'key': key,
1821 'title': title,
1823 'title': title,
1822 'nbitems': len(values),
1824 'nbitems': len(values),
1823 'min': values[0][0],
1825 'min': values[0][0],
1824 '10%': values[(nbvalues * 10) // 100][0],
1826 '10%': values[(nbvalues * 10) // 100][0],
1825 '25%': values[(nbvalues * 25) // 100][0],
1827 '25%': values[(nbvalues * 25) // 100][0],
1826 '50%': values[(nbvalues * 50) // 100][0],
1828 '50%': values[(nbvalues * 50) // 100][0],
1827 '75%': values[(nbvalues * 75) // 100][0],
1829 '75%': values[(nbvalues * 75) // 100][0],
1828 '80%': values[(nbvalues * 80) // 100][0],
1830 '80%': values[(nbvalues * 80) // 100][0],
1829 '85%': values[(nbvalues * 85) // 100][0],
1831 '85%': values[(nbvalues * 85) // 100][0],
1830 '90%': values[(nbvalues * 90) // 100][0],
1832 '90%': values[(nbvalues * 90) // 100][0],
1831 '95%': values[(nbvalues * 95) // 100][0],
1833 '95%': values[(nbvalues * 95) // 100][0],
1832 '99%': values[(nbvalues * 99) // 100][0],
1834 '99%': values[(nbvalues * 99) // 100][0],
1833 'max': values[-1][0],
1835 'max': values[-1][0],
1834 }
1836 }
1835 fm.startitem()
1837 fm.startitem()
1836 fm.data(**stats)
1838 fm.data(**stats)
1837 # make node pretty for the human output
1839 # make node pretty for the human output
1838 fm.plain('### %s (%d items)\n' % (title, len(values)))
1840 fm.plain('### %s (%d items)\n' % (title, len(values)))
1839 lines = [
1841 lines = [
1840 'min',
1842 'min',
1841 '10%',
1843 '10%',
1842 '25%',
1844 '25%',
1843 '50%',
1845 '50%',
1844 '75%',
1846 '75%',
1845 '80%',
1847 '80%',
1846 '85%',
1848 '85%',
1847 '90%',
1849 '90%',
1848 '95%',
1850 '95%',
1849 '99%',
1851 '99%',
1850 'max',
1852 'max',
1851 ]
1853 ]
1852 for l in lines:
1854 for l in lines:
1853 fm.plain('%s: %s\n' % (l, stats[l]))
1855 fm.plain('%s: %s\n' % (l, stats[l]))
1854 fm.end()
1856 fm.end()
1855
1857
1856
1858
1857 @command(
1859 @command(
1858 b'perfhelper-mergecopies',
1860 b'perfhelper-mergecopies',
1859 formatteropts
1861 formatteropts
1860 + [
1862 + [
1861 (b'r', b'revs', [], b'restrict search to these revisions'),
1863 (b'r', b'revs', [], b'restrict search to these revisions'),
1862 (b'', b'timing', False, b'provides extra data (costly)'),
1864 (b'', b'timing', False, b'provides extra data (costly)'),
1863 (b'', b'stats', False, b'provides statistic about the measured data'),
1865 (b'', b'stats', False, b'provides statistic about the measured data'),
1864 ],
1866 ],
1865 )
1867 )
1866 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1868 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1867 """find statistics about potential parameters for `perfmergecopies`
1869 """find statistics about potential parameters for `perfmergecopies`
1868
1870
1869 This command find (base, p1, p2) triplet relevant for copytracing
1871 This command find (base, p1, p2) triplet relevant for copytracing
1870 benchmarking in the context of a merge. It reports values for some of the
1872 benchmarking in the context of a merge. It reports values for some of the
1871 parameters that impact merge copy tracing time during merge.
1873 parameters that impact merge copy tracing time during merge.
1872
1874
1873 If `--timing` is set, rename detection is run and the associated timing
1875 If `--timing` is set, rename detection is run and the associated timing
1874 will be reported. The extra details come at the cost of slower command
1876 will be reported. The extra details come at the cost of slower command
1875 execution.
1877 execution.
1876
1878
1877 Since rename detection is only run once, other factors might easily
1879 Since rename detection is only run once, other factors might easily
1878 affect the precision of the timing. However it should give a good
1880 affect the precision of the timing. However it should give a good
1879 approximation of which revision triplets are very costly.
1881 approximation of which revision triplets are very costly.
1880 """
1882 """
1881 opts = _byteskwargs(opts)
1883 opts = _byteskwargs(opts)
1882 fm = ui.formatter(b'perf', opts)
1884 fm = ui.formatter(b'perf', opts)
1883 dotiming = opts[b'timing']
1885 dotiming = opts[b'timing']
1884 dostats = opts[b'stats']
1886 dostats = opts[b'stats']
1885
1887
1886 output_template = [
1888 output_template = [
1887 ("base", "%(base)12s"),
1889 ("base", "%(base)12s"),
1888 ("p1", "%(p1.node)12s"),
1890 ("p1", "%(p1.node)12s"),
1889 ("p2", "%(p2.node)12s"),
1891 ("p2", "%(p2.node)12s"),
1890 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1892 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1891 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1893 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1892 ("p1.renames", "%(p1.renamedfiles)12d"),
1894 ("p1.renames", "%(p1.renamedfiles)12d"),
1893 ("p1.time", "%(p1.time)12.3f"),
1895 ("p1.time", "%(p1.time)12.3f"),
1894 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1896 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1895 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1897 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1896 ("p2.renames", "%(p2.renamedfiles)12d"),
1898 ("p2.renames", "%(p2.renamedfiles)12d"),
1897 ("p2.time", "%(p2.time)12.3f"),
1899 ("p2.time", "%(p2.time)12.3f"),
1898 ("renames", "%(nbrenamedfiles)12d"),
1900 ("renames", "%(nbrenamedfiles)12d"),
1899 ("total.time", "%(time)12.3f"),
1901 ("total.time", "%(time)12.3f"),
1900 ]
1902 ]
1901 if not dotiming:
1903 if not dotiming:
1902 output_template = [
1904 output_template = [
1903 i
1905 i
1904 for i in output_template
1906 for i in output_template
1905 if not ('time' in i[0] or 'renames' in i[0])
1907 if not ('time' in i[0] or 'renames' in i[0])
1906 ]
1908 ]
1907 header_names = [h for (h, v) in output_template]
1909 header_names = [h for (h, v) in output_template]
1908 output = ' '.join([v for (h, v) in output_template]) + '\n'
1910 output = ' '.join([v for (h, v) in output_template]) + '\n'
1909 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1911 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1910 fm.plain(header % tuple(header_names))
1912 fm.plain(header % tuple(header_names))
1911
1913
1912 if not revs:
1914 if not revs:
1913 revs = ['all()']
1915 revs = ['all()']
1914 revs = scmutil.revrange(repo, revs)
1916 revs = scmutil.revrange(repo, revs)
1915
1917
1916 if dostats:
1918 if dostats:
1917 alldata = {
1919 alldata = {
1918 'nbrevs': [],
1920 'nbrevs': [],
1919 'nbmissingfiles': [],
1921 'nbmissingfiles': [],
1920 }
1922 }
1921 if dotiming:
1923 if dotiming:
1922 alldata['parentnbrenames'] = []
1924 alldata['parentnbrenames'] = []
1923 alldata['totalnbrenames'] = []
1925 alldata['totalnbrenames'] = []
1924 alldata['parenttime'] = []
1926 alldata['parenttime'] = []
1925 alldata['totaltime'] = []
1927 alldata['totaltime'] = []
1926
1928
1927 roi = repo.revs('merge() and %ld', revs)
1929 roi = repo.revs('merge() and %ld', revs)
1928 for r in roi:
1930 for r in roi:
1929 ctx = repo[r]
1931 ctx = repo[r]
1930 p1 = ctx.p1()
1932 p1 = ctx.p1()
1931 p2 = ctx.p2()
1933 p2 = ctx.p2()
1932 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1934 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1933 for b in bases:
1935 for b in bases:
1934 b = repo[b]
1936 b = repo[b]
1935 p1missing = copies._computeforwardmissing(b, p1)
1937 p1missing = copies._computeforwardmissing(b, p1)
1936 p2missing = copies._computeforwardmissing(b, p2)
1938 p2missing = copies._computeforwardmissing(b, p2)
1937 data = {
1939 data = {
1938 b'base': b.hex(),
1940 b'base': b.hex(),
1939 b'p1.node': p1.hex(),
1941 b'p1.node': p1.hex(),
1940 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1942 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1941 b'p1.nbmissingfiles': len(p1missing),
1943 b'p1.nbmissingfiles': len(p1missing),
1942 b'p2.node': p2.hex(),
1944 b'p2.node': p2.hex(),
1943 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1945 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1944 b'p2.nbmissingfiles': len(p2missing),
1946 b'p2.nbmissingfiles': len(p2missing),
1945 }
1947 }
1946 if dostats:
1948 if dostats:
1947 if p1missing:
1949 if p1missing:
1948 alldata['nbrevs'].append(
1950 alldata['nbrevs'].append(
1949 (data['p1.nbrevs'], b.hex(), p1.hex())
1951 (data['p1.nbrevs'], b.hex(), p1.hex())
1950 )
1952 )
1951 alldata['nbmissingfiles'].append(
1953 alldata['nbmissingfiles'].append(
1952 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1954 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1953 )
1955 )
1954 if p2missing:
1956 if p2missing:
1955 alldata['nbrevs'].append(
1957 alldata['nbrevs'].append(
1956 (data['p2.nbrevs'], b.hex(), p2.hex())
1958 (data['p2.nbrevs'], b.hex(), p2.hex())
1957 )
1959 )
1958 alldata['nbmissingfiles'].append(
1960 alldata['nbmissingfiles'].append(
1959 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1961 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1960 )
1962 )
1961 if dotiming:
1963 if dotiming:
1962 begin = util.timer()
1964 begin = util.timer()
1963 mergedata = copies.mergecopies(repo, p1, p2, b)
1965 mergedata = copies.mergecopies(repo, p1, p2, b)
1964 end = util.timer()
1966 end = util.timer()
1965 # not very stable timing since we did only one run
1967 # not very stable timing since we did only one run
1966 data['time'] = end - begin
1968 data['time'] = end - begin
1967 # mergedata contains five dicts: "copy", "movewithdir",
1969 # mergedata contains five dicts: "copy", "movewithdir",
1968 # "diverge", "renamedelete" and "dirmove".
1970 # "diverge", "renamedelete" and "dirmove".
1969 # The first 4 are about renamed file so lets count that.
1971 # The first 4 are about renamed file so lets count that.
1970 renames = len(mergedata[0])
1972 renames = len(mergedata[0])
1971 renames += len(mergedata[1])
1973 renames += len(mergedata[1])
1972 renames += len(mergedata[2])
1974 renames += len(mergedata[2])
1973 renames += len(mergedata[3])
1975 renames += len(mergedata[3])
1974 data['nbrenamedfiles'] = renames
1976 data['nbrenamedfiles'] = renames
1975 begin = util.timer()
1977 begin = util.timer()
1976 p1renames = copies.pathcopies(b, p1)
1978 p1renames = copies.pathcopies(b, p1)
1977 end = util.timer()
1979 end = util.timer()
1978 data['p1.time'] = end - begin
1980 data['p1.time'] = end - begin
1979 begin = util.timer()
1981 begin = util.timer()
1980 p2renames = copies.pathcopies(b, p2)
1982 p2renames = copies.pathcopies(b, p2)
1981 data['p2.time'] = end - begin
1983 data['p2.time'] = end - begin
1982 end = util.timer()
1984 end = util.timer()
1983 data['p1.renamedfiles'] = len(p1renames)
1985 data['p1.renamedfiles'] = len(p1renames)
1984 data['p2.renamedfiles'] = len(p2renames)
1986 data['p2.renamedfiles'] = len(p2renames)
1985
1987
1986 if dostats:
1988 if dostats:
1987 if p1missing:
1989 if p1missing:
1988 alldata['parentnbrenames'].append(
1990 alldata['parentnbrenames'].append(
1989 (data['p1.renamedfiles'], b.hex(), p1.hex())
1991 (data['p1.renamedfiles'], b.hex(), p1.hex())
1990 )
1992 )
1991 alldata['parenttime'].append(
1993 alldata['parenttime'].append(
1992 (data['p1.time'], b.hex(), p1.hex())
1994 (data['p1.time'], b.hex(), p1.hex())
1993 )
1995 )
1994 if p2missing:
1996 if p2missing:
1995 alldata['parentnbrenames'].append(
1997 alldata['parentnbrenames'].append(
1996 (data['p2.renamedfiles'], b.hex(), p2.hex())
1998 (data['p2.renamedfiles'], b.hex(), p2.hex())
1997 )
1999 )
1998 alldata['parenttime'].append(
2000 alldata['parenttime'].append(
1999 (data['p2.time'], b.hex(), p2.hex())
2001 (data['p2.time'], b.hex(), p2.hex())
2000 )
2002 )
2001 if p1missing or p2missing:
2003 if p1missing or p2missing:
2002 alldata['totalnbrenames'].append(
2004 alldata['totalnbrenames'].append(
2003 (
2005 (
2004 data['nbrenamedfiles'],
2006 data['nbrenamedfiles'],
2005 b.hex(),
2007 b.hex(),
2006 p1.hex(),
2008 p1.hex(),
2007 p2.hex(),
2009 p2.hex(),
2008 )
2010 )
2009 )
2011 )
2010 alldata['totaltime'].append(
2012 alldata['totaltime'].append(
2011 (data['time'], b.hex(), p1.hex(), p2.hex())
2013 (data['time'], b.hex(), p1.hex(), p2.hex())
2012 )
2014 )
2013 fm.startitem()
2015 fm.startitem()
2014 fm.data(**data)
2016 fm.data(**data)
2015 # make node pretty for the human output
2017 # make node pretty for the human output
2016 out = data.copy()
2018 out = data.copy()
2017 out['base'] = fm.hexfunc(b.node())
2019 out['base'] = fm.hexfunc(b.node())
2018 out['p1.node'] = fm.hexfunc(p1.node())
2020 out['p1.node'] = fm.hexfunc(p1.node())
2019 out['p2.node'] = fm.hexfunc(p2.node())
2021 out['p2.node'] = fm.hexfunc(p2.node())
2020 fm.plain(output % out)
2022 fm.plain(output % out)
2021
2023
2022 fm.end()
2024 fm.end()
2023 if dostats:
2025 if dostats:
2024 # use a second formatter because the data are quite different, not sure
2026 # use a second formatter because the data are quite different, not sure
2025 # how it flies with the templater.
2027 # how it flies with the templater.
2026 entries = [
2028 entries = [
2027 ('nbrevs', 'number of revision covered'),
2029 ('nbrevs', 'number of revision covered'),
2028 ('nbmissingfiles', 'number of missing files at head'),
2030 ('nbmissingfiles', 'number of missing files at head'),
2029 ]
2031 ]
2030 if dotiming:
2032 if dotiming:
2031 entries.append(
2033 entries.append(
2032 ('parentnbrenames', 'rename from one parent to base')
2034 ('parentnbrenames', 'rename from one parent to base')
2033 )
2035 )
2034 entries.append(('totalnbrenames', 'total number of renames'))
2036 entries.append(('totalnbrenames', 'total number of renames'))
2035 entries.append(('parenttime', 'time for one parent'))
2037 entries.append(('parenttime', 'time for one parent'))
2036 entries.append(('totaltime', 'time for both parents'))
2038 entries.append(('totaltime', 'time for both parents'))
2037 _displaystats(ui, opts, entries, alldata)
2039 _displaystats(ui, opts, entries, alldata)
2038
2040
2039
2041
2040 @command(
2042 @command(
2041 b'perfhelper-pathcopies',
2043 b'perfhelper-pathcopies',
2042 formatteropts
2044 formatteropts
2043 + [
2045 + [
2044 (b'r', b'revs', [], b'restrict search to these revisions'),
2046 (b'r', b'revs', [], b'restrict search to these revisions'),
2045 (b'', b'timing', False, b'provides extra data (costly)'),
2047 (b'', b'timing', False, b'provides extra data (costly)'),
2046 (b'', b'stats', False, b'provides statistic about the measured data'),
2048 (b'', b'stats', False, b'provides statistic about the measured data'),
2047 ],
2049 ],
2048 )
2050 )
2049 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2051 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2050 """find statistic about potential parameters for the `perftracecopies`
2052 """find statistic about potential parameters for the `perftracecopies`
2051
2053
2052 This command find source-destination pair relevant for copytracing testing.
2054 This command find source-destination pair relevant for copytracing testing.
2053 It report value for some of the parameters that impact copy tracing time.
2055 It report value for some of the parameters that impact copy tracing time.
2054
2056
2055 If `--timing` is set, rename detection is run and the associated timing
2057 If `--timing` is set, rename detection is run and the associated timing
2056 will be reported. The extra details comes at the cost of a slower command
2058 will be reported. The extra details comes at the cost of a slower command
2057 execution.
2059 execution.
2058
2060
2059 Since the rename detection is only run once, other factors might easily
2061 Since the rename detection is only run once, other factors might easily
2060 affect the precision of the timing. However it should give a good
2062 affect the precision of the timing. However it should give a good
2061 approximation of which revision pairs are very costly.
2063 approximation of which revision pairs are very costly.
2062 """
2064 """
2063 opts = _byteskwargs(opts)
2065 opts = _byteskwargs(opts)
2064 fm = ui.formatter(b'perf', opts)
2066 fm = ui.formatter(b'perf', opts)
2065 dotiming = opts[b'timing']
2067 dotiming = opts[b'timing']
2066 dostats = opts[b'stats']
2068 dostats = opts[b'stats']
2067
2069
2068 if dotiming:
2070 if dotiming:
2069 header = '%12s %12s %12s %12s %12s %12s\n'
2071 header = '%12s %12s %12s %12s %12s %12s\n'
2070 output = (
2072 output = (
2071 "%(source)12s %(destination)12s "
2073 "%(source)12s %(destination)12s "
2072 "%(nbrevs)12d %(nbmissingfiles)12d "
2074 "%(nbrevs)12d %(nbmissingfiles)12d "
2073 "%(nbrenamedfiles)12d %(time)18.5f\n"
2075 "%(nbrenamedfiles)12d %(time)18.5f\n"
2074 )
2076 )
2075 header_names = (
2077 header_names = (
2076 "source",
2078 "source",
2077 "destination",
2079 "destination",
2078 "nb-revs",
2080 "nb-revs",
2079 "nb-files",
2081 "nb-files",
2080 "nb-renames",
2082 "nb-renames",
2081 "time",
2083 "time",
2082 )
2084 )
2083 fm.plain(header % header_names)
2085 fm.plain(header % header_names)
2084 else:
2086 else:
2085 header = '%12s %12s %12s %12s\n'
2087 header = '%12s %12s %12s %12s\n'
2086 output = (
2088 output = (
2087 "%(source)12s %(destination)12s "
2089 "%(source)12s %(destination)12s "
2088 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2090 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2089 )
2091 )
2090 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2092 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2091
2093
2092 if not revs:
2094 if not revs:
2093 revs = ['all()']
2095 revs = ['all()']
2094 revs = scmutil.revrange(repo, revs)
2096 revs = scmutil.revrange(repo, revs)
2095
2097
2096 if dostats:
2098 if dostats:
2097 alldata = {
2099 alldata = {
2098 'nbrevs': [],
2100 'nbrevs': [],
2099 'nbmissingfiles': [],
2101 'nbmissingfiles': [],
2100 }
2102 }
2101 if dotiming:
2103 if dotiming:
2102 alldata['nbrenames'] = []
2104 alldata['nbrenames'] = []
2103 alldata['time'] = []
2105 alldata['time'] = []
2104
2106
2105 roi = repo.revs('merge() and %ld', revs)
2107 roi = repo.revs('merge() and %ld', revs)
2106 for r in roi:
2108 for r in roi:
2107 ctx = repo[r]
2109 ctx = repo[r]
2108 p1 = ctx.p1().rev()
2110 p1 = ctx.p1().rev()
2109 p2 = ctx.p2().rev()
2111 p2 = ctx.p2().rev()
2110 bases = repo.changelog._commonancestorsheads(p1, p2)
2112 bases = repo.changelog._commonancestorsheads(p1, p2)
2111 for p in (p1, p2):
2113 for p in (p1, p2):
2112 for b in bases:
2114 for b in bases:
2113 base = repo[b]
2115 base = repo[b]
2114 parent = repo[p]
2116 parent = repo[p]
2115 missing = copies._computeforwardmissing(base, parent)
2117 missing = copies._computeforwardmissing(base, parent)
2116 if not missing:
2118 if not missing:
2117 continue
2119 continue
2118 data = {
2120 data = {
2119 b'source': base.hex(),
2121 b'source': base.hex(),
2120 b'destination': parent.hex(),
2122 b'destination': parent.hex(),
2121 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2123 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2122 b'nbmissingfiles': len(missing),
2124 b'nbmissingfiles': len(missing),
2123 }
2125 }
2124 if dostats:
2126 if dostats:
2125 alldata['nbrevs'].append(
2127 alldata['nbrevs'].append(
2126 (data['nbrevs'], base.hex(), parent.hex(),)
2128 (data['nbrevs'], base.hex(), parent.hex(),)
2127 )
2129 )
2128 alldata['nbmissingfiles'].append(
2130 alldata['nbmissingfiles'].append(
2129 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2131 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2130 )
2132 )
2131 if dotiming:
2133 if dotiming:
2132 begin = util.timer()
2134 begin = util.timer()
2133 renames = copies.pathcopies(base, parent)
2135 renames = copies.pathcopies(base, parent)
2134 end = util.timer()
2136 end = util.timer()
2135 # not very stable timing since we did only one run
2137 # not very stable timing since we did only one run
2136 data['time'] = end - begin
2138 data['time'] = end - begin
2137 data['nbrenamedfiles'] = len(renames)
2139 data['nbrenamedfiles'] = len(renames)
2138 if dostats:
2140 if dostats:
2139 alldata['time'].append(
2141 alldata['time'].append(
2140 (data['time'], base.hex(), parent.hex(),)
2142 (data['time'], base.hex(), parent.hex(),)
2141 )
2143 )
2142 alldata['nbrenames'].append(
2144 alldata['nbrenames'].append(
2143 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2145 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2144 )
2146 )
2145 fm.startitem()
2147 fm.startitem()
2146 fm.data(**data)
2148 fm.data(**data)
2147 out = data.copy()
2149 out = data.copy()
2148 out['source'] = fm.hexfunc(base.node())
2150 out['source'] = fm.hexfunc(base.node())
2149 out['destination'] = fm.hexfunc(parent.node())
2151 out['destination'] = fm.hexfunc(parent.node())
2150 fm.plain(output % out)
2152 fm.plain(output % out)
2151
2153
2152 fm.end()
2154 fm.end()
2153 if dostats:
2155 if dostats:
2154 # use a second formatter because the data are quite different, not sure
2156 # use a second formatter because the data are quite different, not sure
2155 # how it flies with the templater.
2157 # how it flies with the templater.
2156 fm = ui.formatter(b'perf', opts)
2158 fm = ui.formatter(b'perf', opts)
2157 entries = [
2159 entries = [
2158 ('nbrevs', 'number of revision covered'),
2160 ('nbrevs', 'number of revision covered'),
2159 ('nbmissingfiles', 'number of missing files at head'),
2161 ('nbmissingfiles', 'number of missing files at head'),
2160 ]
2162 ]
2161 if dotiming:
2163 if dotiming:
2162 entries.append(('nbrenames', 'renamed files'))
2164 entries.append(('nbrenames', 'renamed files'))
2163 entries.append(('time', 'time'))
2165 entries.append(('time', 'time'))
2164 _displaystats(ui, opts, entries, alldata)
2166 _displaystats(ui, opts, entries, alldata)
2165
2167
2166
2168
2167 @command(b'perfcca', formatteropts)
2169 @command(b'perfcca', formatteropts)
2168 def perfcca(ui, repo, **opts):
2170 def perfcca(ui, repo, **opts):
2169 opts = _byteskwargs(opts)
2171 opts = _byteskwargs(opts)
2170 timer, fm = gettimer(ui, opts)
2172 timer, fm = gettimer(ui, opts)
2171 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2173 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2172 fm.end()
2174 fm.end()
2173
2175
2174
2176
2175 @command(b'perffncacheload', formatteropts)
2177 @command(b'perffncacheload', formatteropts)
2176 def perffncacheload(ui, repo, **opts):
2178 def perffncacheload(ui, repo, **opts):
2177 opts = _byteskwargs(opts)
2179 opts = _byteskwargs(opts)
2178 timer, fm = gettimer(ui, opts)
2180 timer, fm = gettimer(ui, opts)
2179 s = repo.store
2181 s = repo.store
2180
2182
2181 def d():
2183 def d():
2182 s.fncache._load()
2184 s.fncache._load()
2183
2185
2184 timer(d)
2186 timer(d)
2185 fm.end()
2187 fm.end()
2186
2188
2187
2189
2188 @command(b'perffncachewrite', formatteropts)
2190 @command(b'perffncachewrite', formatteropts)
2189 def perffncachewrite(ui, repo, **opts):
2191 def perffncachewrite(ui, repo, **opts):
2190 opts = _byteskwargs(opts)
2192 opts = _byteskwargs(opts)
2191 timer, fm = gettimer(ui, opts)
2193 timer, fm = gettimer(ui, opts)
2192 s = repo.store
2194 s = repo.store
2193 lock = repo.lock()
2195 lock = repo.lock()
2194 s.fncache._load()
2196 s.fncache._load()
2195 tr = repo.transaction(b'perffncachewrite')
2197 tr = repo.transaction(b'perffncachewrite')
2196 tr.addbackup(b'fncache')
2198 tr.addbackup(b'fncache')
2197
2199
2198 def d():
2200 def d():
2199 s.fncache._dirty = True
2201 s.fncache._dirty = True
2200 s.fncache.write(tr)
2202 s.fncache.write(tr)
2201
2203
2202 timer(d)
2204 timer(d)
2203 tr.close()
2205 tr.close()
2204 lock.release()
2206 lock.release()
2205 fm.end()
2207 fm.end()
2206
2208
2207
2209
2208 @command(b'perffncacheencode', formatteropts)
2210 @command(b'perffncacheencode', formatteropts)
2209 def perffncacheencode(ui, repo, **opts):
2211 def perffncacheencode(ui, repo, **opts):
2210 opts = _byteskwargs(opts)
2212 opts = _byteskwargs(opts)
2211 timer, fm = gettimer(ui, opts)
2213 timer, fm = gettimer(ui, opts)
2212 s = repo.store
2214 s = repo.store
2213 s.fncache._load()
2215 s.fncache._load()
2214
2216
2215 def d():
2217 def d():
2216 for p in s.fncache.entries:
2218 for p in s.fncache.entries:
2217 s.encode(p)
2219 s.encode(p)
2218
2220
2219 timer(d)
2221 timer(d)
2220 fm.end()
2222 fm.end()
2221
2223
2222
2224
2223 def _bdiffworker(q, blocks, xdiff, ready, done):
2225 def _bdiffworker(q, blocks, xdiff, ready, done):
2224 while not done.is_set():
2226 while not done.is_set():
2225 pair = q.get()
2227 pair = q.get()
2226 while pair is not None:
2228 while pair is not None:
2227 if xdiff:
2229 if xdiff:
2228 mdiff.bdiff.xdiffblocks(*pair)
2230 mdiff.bdiff.xdiffblocks(*pair)
2229 elif blocks:
2231 elif blocks:
2230 mdiff.bdiff.blocks(*pair)
2232 mdiff.bdiff.blocks(*pair)
2231 else:
2233 else:
2232 mdiff.textdiff(*pair)
2234 mdiff.textdiff(*pair)
2233 q.task_done()
2235 q.task_done()
2234 pair = q.get()
2236 pair = q.get()
2235 q.task_done() # for the None one
2237 q.task_done() # for the None one
2236 with ready:
2238 with ready:
2237 ready.wait()
2239 ready.wait()
2238
2240
2239
2241
2240 def _manifestrevision(repo, mnode):
2242 def _manifestrevision(repo, mnode):
2241 ml = repo.manifestlog
2243 ml = repo.manifestlog
2242
2244
2243 if util.safehasattr(ml, b'getstorage'):
2245 if util.safehasattr(ml, b'getstorage'):
2244 store = ml.getstorage(b'')
2246 store = ml.getstorage(b'')
2245 else:
2247 else:
2246 store = ml._revlog
2248 store = ml._revlog
2247
2249
2248 return store.revision(mnode)
2250 return store.revision(mnode)
2249
2251
2250
2252
2251 @command(
2253 @command(
2252 b'perfbdiff',
2254 b'perfbdiff',
2253 revlogopts
2255 revlogopts
2254 + formatteropts
2256 + formatteropts
2255 + [
2257 + [
2256 (
2258 (
2257 b'',
2259 b'',
2258 b'count',
2260 b'count',
2259 1,
2261 1,
2260 b'number of revisions to test (when using --startrev)',
2262 b'number of revisions to test (when using --startrev)',
2261 ),
2263 ),
2262 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2264 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2263 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2265 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2264 (b'', b'blocks', False, b'test computing diffs into blocks'),
2266 (b'', b'blocks', False, b'test computing diffs into blocks'),
2265 (b'', b'xdiff', False, b'use xdiff algorithm'),
2267 (b'', b'xdiff', False, b'use xdiff algorithm'),
2266 ],
2268 ],
2267 b'-c|-m|FILE REV',
2269 b'-c|-m|FILE REV',
2268 )
2270 )
2269 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2271 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2270 """benchmark a bdiff between revisions
2272 """benchmark a bdiff between revisions
2271
2273
2272 By default, benchmark a bdiff between its delta parent and itself.
2274 By default, benchmark a bdiff between its delta parent and itself.
2273
2275
2274 With ``--count``, benchmark bdiffs between delta parents and self for N
2276 With ``--count``, benchmark bdiffs between delta parents and self for N
2275 revisions starting at the specified revision.
2277 revisions starting at the specified revision.
2276
2278
2277 With ``--alldata``, assume the requested revision is a changeset and
2279 With ``--alldata``, assume the requested revision is a changeset and
2278 measure bdiffs for all changes related to that changeset (manifest
2280 measure bdiffs for all changes related to that changeset (manifest
2279 and filelogs).
2281 and filelogs).
2280 """
2282 """
2281 opts = _byteskwargs(opts)
2283 opts = _byteskwargs(opts)
2282
2284
2283 if opts[b'xdiff'] and not opts[b'blocks']:
2285 if opts[b'xdiff'] and not opts[b'blocks']:
2284 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2286 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2285
2287
2286 if opts[b'alldata']:
2288 if opts[b'alldata']:
2287 opts[b'changelog'] = True
2289 opts[b'changelog'] = True
2288
2290
2289 if opts.get(b'changelog') or opts.get(b'manifest'):
2291 if opts.get(b'changelog') or opts.get(b'manifest'):
2290 file_, rev = None, file_
2292 file_, rev = None, file_
2291 elif rev is None:
2293 elif rev is None:
2292 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2294 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2293
2295
2294 blocks = opts[b'blocks']
2296 blocks = opts[b'blocks']
2295 xdiff = opts[b'xdiff']
2297 xdiff = opts[b'xdiff']
2296 textpairs = []
2298 textpairs = []
2297
2299
2298 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2300 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2299
2301
2300 startrev = r.rev(r.lookup(rev))
2302 startrev = r.rev(r.lookup(rev))
2301 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2303 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2302 if opts[b'alldata']:
2304 if opts[b'alldata']:
2303 # Load revisions associated with changeset.
2305 # Load revisions associated with changeset.
2304 ctx = repo[rev]
2306 ctx = repo[rev]
2305 mtext = _manifestrevision(repo, ctx.manifestnode())
2307 mtext = _manifestrevision(repo, ctx.manifestnode())
2306 for pctx in ctx.parents():
2308 for pctx in ctx.parents():
2307 pman = _manifestrevision(repo, pctx.manifestnode())
2309 pman = _manifestrevision(repo, pctx.manifestnode())
2308 textpairs.append((pman, mtext))
2310 textpairs.append((pman, mtext))
2309
2311
2310 # Load filelog revisions by iterating manifest delta.
2312 # Load filelog revisions by iterating manifest delta.
2311 man = ctx.manifest()
2313 man = ctx.manifest()
2312 pman = ctx.p1().manifest()
2314 pman = ctx.p1().manifest()
2313 for filename, change in pman.diff(man).items():
2315 for filename, change in pman.diff(man).items():
2314 fctx = repo.file(filename)
2316 fctx = repo.file(filename)
2315 f1 = fctx.revision(change[0][0] or -1)
2317 f1 = fctx.revision(change[0][0] or -1)
2316 f2 = fctx.revision(change[1][0] or -1)
2318 f2 = fctx.revision(change[1][0] or -1)
2317 textpairs.append((f1, f2))
2319 textpairs.append((f1, f2))
2318 else:
2320 else:
2319 dp = r.deltaparent(rev)
2321 dp = r.deltaparent(rev)
2320 textpairs.append((r.revision(dp), r.revision(rev)))
2322 textpairs.append((r.revision(dp), r.revision(rev)))
2321
2323
2322 withthreads = threads > 0
2324 withthreads = threads > 0
2323 if not withthreads:
2325 if not withthreads:
2324
2326
2325 def d():
2327 def d():
2326 for pair in textpairs:
2328 for pair in textpairs:
2327 if xdiff:
2329 if xdiff:
2328 mdiff.bdiff.xdiffblocks(*pair)
2330 mdiff.bdiff.xdiffblocks(*pair)
2329 elif blocks:
2331 elif blocks:
2330 mdiff.bdiff.blocks(*pair)
2332 mdiff.bdiff.blocks(*pair)
2331 else:
2333 else:
2332 mdiff.textdiff(*pair)
2334 mdiff.textdiff(*pair)
2333
2335
2334 else:
2336 else:
2335 q = queue()
2337 q = queue()
2336 for i in _xrange(threads):
2338 for i in _xrange(threads):
2337 q.put(None)
2339 q.put(None)
2338 ready = threading.Condition()
2340 ready = threading.Condition()
2339 done = threading.Event()
2341 done = threading.Event()
2340 for i in _xrange(threads):
2342 for i in _xrange(threads):
2341 threading.Thread(
2343 threading.Thread(
2342 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2344 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2343 ).start()
2345 ).start()
2344 q.join()
2346 q.join()
2345
2347
2346 def d():
2348 def d():
2347 for pair in textpairs:
2349 for pair in textpairs:
2348 q.put(pair)
2350 q.put(pair)
2349 for i in _xrange(threads):
2351 for i in _xrange(threads):
2350 q.put(None)
2352 q.put(None)
2351 with ready:
2353 with ready:
2352 ready.notify_all()
2354 ready.notify_all()
2353 q.join()
2355 q.join()
2354
2356
2355 timer, fm = gettimer(ui, opts)
2357 timer, fm = gettimer(ui, opts)
2356 timer(d)
2358 timer(d)
2357 fm.end()
2359 fm.end()
2358
2360
2359 if withthreads:
2361 if withthreads:
2360 done.set()
2362 done.set()
2361 for i in _xrange(threads):
2363 for i in _xrange(threads):
2362 q.put(None)
2364 q.put(None)
2363 with ready:
2365 with ready:
2364 ready.notify_all()
2366 ready.notify_all()
2365
2367
2366
2368
2367 @command(
2369 @command(
2368 b'perfunidiff',
2370 b'perfunidiff',
2369 revlogopts
2371 revlogopts
2370 + formatteropts
2372 + formatteropts
2371 + [
2373 + [
2372 (
2374 (
2373 b'',
2375 b'',
2374 b'count',
2376 b'count',
2375 1,
2377 1,
2376 b'number of revisions to test (when using --startrev)',
2378 b'number of revisions to test (when using --startrev)',
2377 ),
2379 ),
2378 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2380 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2379 ],
2381 ],
2380 b'-c|-m|FILE REV',
2382 b'-c|-m|FILE REV',
2381 )
2383 )
2382 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2384 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2383 """benchmark a unified diff between revisions
2385 """benchmark a unified diff between revisions
2384
2386
2385 This doesn't include any copy tracing - it's just a unified diff
2387 This doesn't include any copy tracing - it's just a unified diff
2386 of the texts.
2388 of the texts.
2387
2389
2388 By default, benchmark a diff between its delta parent and itself.
2390 By default, benchmark a diff between its delta parent and itself.
2389
2391
2390 With ``--count``, benchmark diffs between delta parents and self for N
2392 With ``--count``, benchmark diffs between delta parents and self for N
2391 revisions starting at the specified revision.
2393 revisions starting at the specified revision.
2392
2394
2393 With ``--alldata``, assume the requested revision is a changeset and
2395 With ``--alldata``, assume the requested revision is a changeset and
2394 measure diffs for all changes related to that changeset (manifest
2396 measure diffs for all changes related to that changeset (manifest
2395 and filelogs).
2397 and filelogs).
2396 """
2398 """
2397 opts = _byteskwargs(opts)
2399 opts = _byteskwargs(opts)
2398 if opts[b'alldata']:
2400 if opts[b'alldata']:
2399 opts[b'changelog'] = True
2401 opts[b'changelog'] = True
2400
2402
2401 if opts.get(b'changelog') or opts.get(b'manifest'):
2403 if opts.get(b'changelog') or opts.get(b'manifest'):
2402 file_, rev = None, file_
2404 file_, rev = None, file_
2403 elif rev is None:
2405 elif rev is None:
2404 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2406 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2405
2407
2406 textpairs = []
2408 textpairs = []
2407
2409
2408 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2410 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2409
2411
2410 startrev = r.rev(r.lookup(rev))
2412 startrev = r.rev(r.lookup(rev))
2411 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2413 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2412 if opts[b'alldata']:
2414 if opts[b'alldata']:
2413 # Load revisions associated with changeset.
2415 # Load revisions associated with changeset.
2414 ctx = repo[rev]
2416 ctx = repo[rev]
2415 mtext = _manifestrevision(repo, ctx.manifestnode())
2417 mtext = _manifestrevision(repo, ctx.manifestnode())
2416 for pctx in ctx.parents():
2418 for pctx in ctx.parents():
2417 pman = _manifestrevision(repo, pctx.manifestnode())
2419 pman = _manifestrevision(repo, pctx.manifestnode())
2418 textpairs.append((pman, mtext))
2420 textpairs.append((pman, mtext))
2419
2421
2420 # Load filelog revisions by iterating manifest delta.
2422 # Load filelog revisions by iterating manifest delta.
2421 man = ctx.manifest()
2423 man = ctx.manifest()
2422 pman = ctx.p1().manifest()
2424 pman = ctx.p1().manifest()
2423 for filename, change in pman.diff(man).items():
2425 for filename, change in pman.diff(man).items():
2424 fctx = repo.file(filename)
2426 fctx = repo.file(filename)
2425 f1 = fctx.revision(change[0][0] or -1)
2427 f1 = fctx.revision(change[0][0] or -1)
2426 f2 = fctx.revision(change[1][0] or -1)
2428 f2 = fctx.revision(change[1][0] or -1)
2427 textpairs.append((f1, f2))
2429 textpairs.append((f1, f2))
2428 else:
2430 else:
2429 dp = r.deltaparent(rev)
2431 dp = r.deltaparent(rev)
2430 textpairs.append((r.revision(dp), r.revision(rev)))
2432 textpairs.append((r.revision(dp), r.revision(rev)))
2431
2433
2432 def d():
2434 def d():
2433 for left, right in textpairs:
2435 for left, right in textpairs:
2434 # The date strings don't matter, so we pass empty strings.
2436 # The date strings don't matter, so we pass empty strings.
2435 headerlines, hunks = mdiff.unidiff(
2437 headerlines, hunks = mdiff.unidiff(
2436 left, b'', right, b'', b'left', b'right', binary=False
2438 left, b'', right, b'', b'left', b'right', binary=False
2437 )
2439 )
2438 # consume iterators in roughly the way patch.py does
2440 # consume iterators in roughly the way patch.py does
2439 b'\n'.join(headerlines)
2441 b'\n'.join(headerlines)
2440 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2442 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2441
2443
2442 timer, fm = gettimer(ui, opts)
2444 timer, fm = gettimer(ui, opts)
2443 timer(d)
2445 timer(d)
2444 fm.end()
2446 fm.end()
2445
2447
2446
2448
2447 @command(b'perfdiffwd', formatteropts)
2449 @command(b'perfdiffwd', formatteropts)
2448 def perfdiffwd(ui, repo, **opts):
2450 def perfdiffwd(ui, repo, **opts):
2449 """Profile diff of working directory changes"""
2451 """Profile diff of working directory changes"""
2450 opts = _byteskwargs(opts)
2452 opts = _byteskwargs(opts)
2451 timer, fm = gettimer(ui, opts)
2453 timer, fm = gettimer(ui, opts)
2452 options = {
2454 options = {
2453 'w': 'ignore_all_space',
2455 'w': 'ignore_all_space',
2454 'b': 'ignore_space_change',
2456 'b': 'ignore_space_change',
2455 'B': 'ignore_blank_lines',
2457 'B': 'ignore_blank_lines',
2456 }
2458 }
2457
2459
2458 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2460 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2459 opts = dict((options[c], b'1') for c in diffopt)
2461 opts = dict((options[c], b'1') for c in diffopt)
2460
2462
2461 def d():
2463 def d():
2462 ui.pushbuffer()
2464 ui.pushbuffer()
2463 commands.diff(ui, repo, **opts)
2465 commands.diff(ui, repo, **opts)
2464 ui.popbuffer()
2466 ui.popbuffer()
2465
2467
2466 diffopt = diffopt.encode('ascii')
2468 diffopt = diffopt.encode('ascii')
2467 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2469 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2468 timer(d, title=title)
2470 timer(d, title=title)
2469 fm.end()
2471 fm.end()
2470
2472
2471
2473
2472 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2474 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2473 def perfrevlogindex(ui, repo, file_=None, **opts):
2475 def perfrevlogindex(ui, repo, file_=None, **opts):
2474 """Benchmark operations against a revlog index.
2476 """Benchmark operations against a revlog index.
2475
2477
2476 This tests constructing a revlog instance, reading index data,
2478 This tests constructing a revlog instance, reading index data,
2477 parsing index data, and performing various operations related to
2479 parsing index data, and performing various operations related to
2478 index data.
2480 index data.
2479 """
2481 """
2480
2482
2481 opts = _byteskwargs(opts)
2483 opts = _byteskwargs(opts)
2482
2484
2483 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2485 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2484
2486
2485 opener = getattr(rl, 'opener') # trick linter
2487 opener = getattr(rl, 'opener') # trick linter
2486 indexfile = rl.indexfile
2488 indexfile = rl.indexfile
2487 data = opener.read(indexfile)
2489 data = opener.read(indexfile)
2488
2490
2489 header = struct.unpack(b'>I', data[0:4])[0]
2491 header = struct.unpack(b'>I', data[0:4])[0]
2490 version = header & 0xFFFF
2492 version = header & 0xFFFF
2491 if version == 1:
2493 if version == 1:
2492 revlogio = revlog.revlogio()
2494 revlogio = revlog.revlogio()
2493 inline = header & (1 << 16)
2495 inline = header & (1 << 16)
2494 else:
2496 else:
2495 raise error.Abort(b'unsupported revlog version: %d' % version)
2497 raise error.Abort(b'unsupported revlog version: %d' % version)
2496
2498
2497 rllen = len(rl)
2499 rllen = len(rl)
2498
2500
2499 node0 = rl.node(0)
2501 node0 = rl.node(0)
2500 node25 = rl.node(rllen // 4)
2502 node25 = rl.node(rllen // 4)
2501 node50 = rl.node(rllen // 2)
2503 node50 = rl.node(rllen // 2)
2502 node75 = rl.node(rllen // 4 * 3)
2504 node75 = rl.node(rllen // 4 * 3)
2503 node100 = rl.node(rllen - 1)
2505 node100 = rl.node(rllen - 1)
2504
2506
2505 allrevs = range(rllen)
2507 allrevs = range(rllen)
2506 allrevsrev = list(reversed(allrevs))
2508 allrevsrev = list(reversed(allrevs))
2507 allnodes = [rl.node(rev) for rev in range(rllen)]
2509 allnodes = [rl.node(rev) for rev in range(rllen)]
2508 allnodesrev = list(reversed(allnodes))
2510 allnodesrev = list(reversed(allnodes))
2509
2511
2510 def constructor():
2512 def constructor():
2511 revlog.revlog(opener, indexfile)
2513 revlog.revlog(opener, indexfile)
2512
2514
2513 def read():
2515 def read():
2514 with opener(indexfile) as fh:
2516 with opener(indexfile) as fh:
2515 fh.read()
2517 fh.read()
2516
2518
2517 def parseindex():
2519 def parseindex():
2518 revlogio.parseindex(data, inline)
2520 revlogio.parseindex(data, inline)
2519
2521
2520 def getentry(revornode):
2522 def getentry(revornode):
2521 index = revlogio.parseindex(data, inline)[0]
2523 index = revlogio.parseindex(data, inline)[0]
2522 index[revornode]
2524 index[revornode]
2523
2525
2524 def getentries(revs, count=1):
2526 def getentries(revs, count=1):
2525 index = revlogio.parseindex(data, inline)[0]
2527 index = revlogio.parseindex(data, inline)[0]
2526
2528
2527 for i in range(count):
2529 for i in range(count):
2528 for rev in revs:
2530 for rev in revs:
2529 index[rev]
2531 index[rev]
2530
2532
2531 def resolvenode(node):
2533 def resolvenode(node):
2532 nodemap = revlogio.parseindex(data, inline)[1]
2534 nodemap = revlogio.parseindex(data, inline)[1]
2533 # This only works for the C code.
2535 # This only works for the C code.
2534 if nodemap is None:
2536 if nodemap is None:
2535 return
2537 return
2536
2538
2537 try:
2539 try:
2538 nodemap[node]
2540 nodemap[node]
2539 except error.RevlogError:
2541 except error.RevlogError:
2540 pass
2542 pass
2541
2543
2542 def resolvenodes(nodes, count=1):
2544 def resolvenodes(nodes, count=1):
2543 nodemap = revlogio.parseindex(data, inline)[1]
2545 nodemap = revlogio.parseindex(data, inline)[1]
2544 if nodemap is None:
2546 if nodemap is None:
2545 return
2547 return
2546
2548
2547 for i in range(count):
2549 for i in range(count):
2548 for node in nodes:
2550 for node in nodes:
2549 try:
2551 try:
2550 nodemap[node]
2552 nodemap[node]
2551 except error.RevlogError:
2553 except error.RevlogError:
2552 pass
2554 pass
2553
2555
2554 benches = [
2556 benches = [
2555 (constructor, b'revlog constructor'),
2557 (constructor, b'revlog constructor'),
2556 (read, b'read'),
2558 (read, b'read'),
2557 (parseindex, b'create index object'),
2559 (parseindex, b'create index object'),
2558 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2560 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2559 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2561 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2560 (lambda: resolvenode(node0), b'look up node at rev 0'),
2562 (lambda: resolvenode(node0), b'look up node at rev 0'),
2561 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2563 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2562 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2564 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2563 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2565 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2564 (lambda: resolvenode(node100), b'look up node at tip'),
2566 (lambda: resolvenode(node100), b'look up node at tip'),
2565 # 2x variation is to measure caching impact.
2567 # 2x variation is to measure caching impact.
2566 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2568 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2567 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2569 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2568 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2570 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2569 (
2571 (
2570 lambda: resolvenodes(allnodesrev, 2),
2572 lambda: resolvenodes(allnodesrev, 2),
2571 b'look up all nodes 2x (reverse)',
2573 b'look up all nodes 2x (reverse)',
2572 ),
2574 ),
2573 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2575 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2574 (
2576 (
2575 lambda: getentries(allrevs, 2),
2577 lambda: getentries(allrevs, 2),
2576 b'retrieve all index entries 2x (forward)',
2578 b'retrieve all index entries 2x (forward)',
2577 ),
2579 ),
2578 (
2580 (
2579 lambda: getentries(allrevsrev),
2581 lambda: getentries(allrevsrev),
2580 b'retrieve all index entries (reverse)',
2582 b'retrieve all index entries (reverse)',
2581 ),
2583 ),
2582 (
2584 (
2583 lambda: getentries(allrevsrev, 2),
2585 lambda: getentries(allrevsrev, 2),
2584 b'retrieve all index entries 2x (reverse)',
2586 b'retrieve all index entries 2x (reverse)',
2585 ),
2587 ),
2586 ]
2588 ]
2587
2589
2588 for fn, title in benches:
2590 for fn, title in benches:
2589 timer, fm = gettimer(ui, opts)
2591 timer, fm = gettimer(ui, opts)
2590 timer(fn, title=title)
2592 timer(fn, title=title)
2591 fm.end()
2593 fm.end()
2592
2594
2593
2595
2594 @command(
2596 @command(
2595 b'perfrevlogrevisions',
2597 b'perfrevlogrevisions',
2596 revlogopts
2598 revlogopts
2597 + formatteropts
2599 + formatteropts
2598 + [
2600 + [
2599 (b'd', b'dist', 100, b'distance between the revisions'),
2601 (b'd', b'dist', 100, b'distance between the revisions'),
2600 (b's', b'startrev', 0, b'revision to start reading at'),
2602 (b's', b'startrev', 0, b'revision to start reading at'),
2601 (b'', b'reverse', False, b'read in reverse'),
2603 (b'', b'reverse', False, b'read in reverse'),
2602 ],
2604 ],
2603 b'-c|-m|FILE',
2605 b'-c|-m|FILE',
2604 )
2606 )
2605 def perfrevlogrevisions(
2607 def perfrevlogrevisions(
2606 ui, repo, file_=None, startrev=0, reverse=False, **opts
2608 ui, repo, file_=None, startrev=0, reverse=False, **opts
2607 ):
2609 ):
2608 """Benchmark reading a series of revisions from a revlog.
2610 """Benchmark reading a series of revisions from a revlog.
2609
2611
2610 By default, we read every ``-d/--dist`` revision from 0 to tip of
2612 By default, we read every ``-d/--dist`` revision from 0 to tip of
2611 the specified revlog.
2613 the specified revlog.
2612
2614
2613 The start revision can be defined via ``-s/--startrev``.
2615 The start revision can be defined via ``-s/--startrev``.
2614 """
2616 """
2615 opts = _byteskwargs(opts)
2617 opts = _byteskwargs(opts)
2616
2618
2617 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2619 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2618 rllen = getlen(ui)(rl)
2620 rllen = getlen(ui)(rl)
2619
2621
2620 if startrev < 0:
2622 if startrev < 0:
2621 startrev = rllen + startrev
2623 startrev = rllen + startrev
2622
2624
2623 def d():
2625 def d():
2624 rl.clearcaches()
2626 rl.clearcaches()
2625
2627
2626 beginrev = startrev
2628 beginrev = startrev
2627 endrev = rllen
2629 endrev = rllen
2628 dist = opts[b'dist']
2630 dist = opts[b'dist']
2629
2631
2630 if reverse:
2632 if reverse:
2631 beginrev, endrev = endrev - 1, beginrev - 1
2633 beginrev, endrev = endrev - 1, beginrev - 1
2632 dist = -1 * dist
2634 dist = -1 * dist
2633
2635
2634 for x in _xrange(beginrev, endrev, dist):
2636 for x in _xrange(beginrev, endrev, dist):
2635 # Old revisions don't support passing int.
2637 # Old revisions don't support passing int.
2636 n = rl.node(x)
2638 n = rl.node(x)
2637 rl.revision(n)
2639 rl.revision(n)
2638
2640
2639 timer, fm = gettimer(ui, opts)
2641 timer, fm = gettimer(ui, opts)
2640 timer(d)
2642 timer(d)
2641 fm.end()
2643 fm.end()
2642
2644
2643
2645
2644 @command(
2646 @command(
2645 b'perfrevlogwrite',
2647 b'perfrevlogwrite',
2646 revlogopts
2648 revlogopts
2647 + formatteropts
2649 + formatteropts
2648 + [
2650 + [
2649 (b's', b'startrev', 1000, b'revision to start writing at'),
2651 (b's', b'startrev', 1000, b'revision to start writing at'),
2650 (b'', b'stoprev', -1, b'last revision to write'),
2652 (b'', b'stoprev', -1, b'last revision to write'),
2651 (b'', b'count', 3, b'number of passes to perform'),
2653 (b'', b'count', 3, b'number of passes to perform'),
2652 (b'', b'details', False, b'print timing for every revisions tested'),
2654 (b'', b'details', False, b'print timing for every revisions tested'),
2653 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2655 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2654 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2656 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2655 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2657 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2656 ],
2658 ],
2657 b'-c|-m|FILE',
2659 b'-c|-m|FILE',
2658 )
2660 )
2659 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2661 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2660 """Benchmark writing a series of revisions to a revlog.
2662 """Benchmark writing a series of revisions to a revlog.
2661
2663
2662 Possible source values are:
2664 Possible source values are:
2663 * `full`: add from a full text (default).
2665 * `full`: add from a full text (default).
2664 * `parent-1`: add from a delta to the first parent
2666 * `parent-1`: add from a delta to the first parent
2665 * `parent-2`: add from a delta to the second parent if it exists
2667 * `parent-2`: add from a delta to the second parent if it exists
2666 (use a delta from the first parent otherwise)
2668 (use a delta from the first parent otherwise)
2667 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2669 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2668 * `storage`: add from the existing precomputed deltas
2670 * `storage`: add from the existing precomputed deltas
2669
2671
2670 Note: This performance command measures performance in a custom way. As a
2672 Note: This performance command measures performance in a custom way. As a
2671 result some of the global configuration of the 'perf' command does not
2673 result some of the global configuration of the 'perf' command does not
2672 apply to it:
2674 apply to it:
2673
2675
2674 * ``pre-run``: disabled
2676 * ``pre-run``: disabled
2675
2677
2676 * ``profile-benchmark``: disabled
2678 * ``profile-benchmark``: disabled
2677
2679
2678 * ``run-limits``: disabled use --count instead
2680 * ``run-limits``: disabled use --count instead
2679 """
2681 """
2680 opts = _byteskwargs(opts)
2682 opts = _byteskwargs(opts)
2681
2683
2682 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2684 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2683 rllen = getlen(ui)(rl)
2685 rllen = getlen(ui)(rl)
2684 if startrev < 0:
2686 if startrev < 0:
2685 startrev = rllen + startrev
2687 startrev = rllen + startrev
2686 if stoprev < 0:
2688 if stoprev < 0:
2687 stoprev = rllen + stoprev
2689 stoprev = rllen + stoprev
2688
2690
2689 lazydeltabase = opts['lazydeltabase']
2691 lazydeltabase = opts['lazydeltabase']
2690 source = opts['source']
2692 source = opts['source']
2691 clearcaches = opts['clear_caches']
2693 clearcaches = opts['clear_caches']
2692 validsource = (
2694 validsource = (
2693 b'full',
2695 b'full',
2694 b'parent-1',
2696 b'parent-1',
2695 b'parent-2',
2697 b'parent-2',
2696 b'parent-smallest',
2698 b'parent-smallest',
2697 b'storage',
2699 b'storage',
2698 )
2700 )
2699 if source not in validsource:
2701 if source not in validsource:
2700 raise error.Abort('invalid source type: %s' % source)
2702 raise error.Abort('invalid source type: %s' % source)
2701
2703
2702 ### actually gather results
2704 ### actually gather results
2703 count = opts['count']
2705 count = opts['count']
2704 if count <= 0:
2706 if count <= 0:
2705 raise error.Abort('invalide run count: %d' % count)
2707 raise error.Abort('invalide run count: %d' % count)
2706 allresults = []
2708 allresults = []
2707 for c in range(count):
2709 for c in range(count):
2708 timing = _timeonewrite(
2710 timing = _timeonewrite(
2709 ui,
2711 ui,
2710 rl,
2712 rl,
2711 source,
2713 source,
2712 startrev,
2714 startrev,
2713 stoprev,
2715 stoprev,
2714 c + 1,
2716 c + 1,
2715 lazydeltabase=lazydeltabase,
2717 lazydeltabase=lazydeltabase,
2716 clearcaches=clearcaches,
2718 clearcaches=clearcaches,
2717 )
2719 )
2718 allresults.append(timing)
2720 allresults.append(timing)
2719
2721
2720 ### consolidate the results in a single list
2722 ### consolidate the results in a single list
2721 results = []
2723 results = []
2722 for idx, (rev, t) in enumerate(allresults[0]):
2724 for idx, (rev, t) in enumerate(allresults[0]):
2723 ts = [t]
2725 ts = [t]
2724 for other in allresults[1:]:
2726 for other in allresults[1:]:
2725 orev, ot = other[idx]
2727 orev, ot = other[idx]
2726 assert orev == rev
2728 assert orev == rev
2727 ts.append(ot)
2729 ts.append(ot)
2728 results.append((rev, ts))
2730 results.append((rev, ts))
2729 resultcount = len(results)
2731 resultcount = len(results)
2730
2732
2731 ### Compute and display relevant statistics
2733 ### Compute and display relevant statistics
2732
2734
2733 # get a formatter
2735 # get a formatter
2734 fm = ui.formatter(b'perf', opts)
2736 fm = ui.formatter(b'perf', opts)
2735 displayall = ui.configbool(b"perf", b"all-timing", False)
2737 displayall = ui.configbool(b"perf", b"all-timing", False)
2736
2738
2737 # print individual details if requested
2739 # print individual details if requested
2738 if opts['details']:
2740 if opts['details']:
2739 for idx, item in enumerate(results, 1):
2741 for idx, item in enumerate(results, 1):
2740 rev, data = item
2742 rev, data = item
2741 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2743 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2742 formatone(fm, data, title=title, displayall=displayall)
2744 formatone(fm, data, title=title, displayall=displayall)
2743
2745
2744 # sorts results by median time
2746 # sorts results by median time
2745 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2747 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2746 # list of (name, index) to display)
2748 # list of (name, index) to display)
2747 relevants = [
2749 relevants = [
2748 ("min", 0),
2750 ("min", 0),
2749 ("10%", resultcount * 10 // 100),
2751 ("10%", resultcount * 10 // 100),
2750 ("25%", resultcount * 25 // 100),
2752 ("25%", resultcount * 25 // 100),
2751 ("50%", resultcount * 70 // 100),
2753 ("50%", resultcount * 70 // 100),
2752 ("75%", resultcount * 75 // 100),
2754 ("75%", resultcount * 75 // 100),
2753 ("90%", resultcount * 90 // 100),
2755 ("90%", resultcount * 90 // 100),
2754 ("95%", resultcount * 95 // 100),
2756 ("95%", resultcount * 95 // 100),
2755 ("99%", resultcount * 99 // 100),
2757 ("99%", resultcount * 99 // 100),
2756 ("99.9%", resultcount * 999 // 1000),
2758 ("99.9%", resultcount * 999 // 1000),
2757 ("99.99%", resultcount * 9999 // 10000),
2759 ("99.99%", resultcount * 9999 // 10000),
2758 ("99.999%", resultcount * 99999 // 100000),
2760 ("99.999%", resultcount * 99999 // 100000),
2759 ("max", -1),
2761 ("max", -1),
2760 ]
2762 ]
2761 if not ui.quiet:
2763 if not ui.quiet:
2762 for name, idx in relevants:
2764 for name, idx in relevants:
2763 data = results[idx]
2765 data = results[idx]
2764 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2766 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2765 formatone(fm, data[1], title=title, displayall=displayall)
2767 formatone(fm, data[1], title=title, displayall=displayall)
2766
2768
2767 # XXX summing that many float will not be very precise, we ignore this fact
2769 # XXX summing that many float will not be very precise, we ignore this fact
2768 # for now
2770 # for now
2769 totaltime = []
2771 totaltime = []
2770 for item in allresults:
2772 for item in allresults:
2771 totaltime.append(
2773 totaltime.append(
2772 (
2774 (
2773 sum(x[1][0] for x in item),
2775 sum(x[1][0] for x in item),
2774 sum(x[1][1] for x in item),
2776 sum(x[1][1] for x in item),
2775 sum(x[1][2] for x in item),
2777 sum(x[1][2] for x in item),
2776 )
2778 )
2777 )
2779 )
2778 formatone(
2780 formatone(
2779 fm,
2781 fm,
2780 totaltime,
2782 totaltime,
2781 title="total time (%d revs)" % resultcount,
2783 title="total time (%d revs)" % resultcount,
2782 displayall=displayall,
2784 displayall=displayall,
2783 )
2785 )
2784 fm.end()
2786 fm.end()
2785
2787
2786
2788
2787 class _faketr(object):
2789 class _faketr(object):
2788 def add(s, x, y, z=None):
2790 def add(s, x, y, z=None):
2789 return None
2791 return None
2790
2792
2791
2793
2792 def _timeonewrite(
2794 def _timeonewrite(
2793 ui,
2795 ui,
2794 orig,
2796 orig,
2795 source,
2797 source,
2796 startrev,
2798 startrev,
2797 stoprev,
2799 stoprev,
2798 runidx=None,
2800 runidx=None,
2799 lazydeltabase=True,
2801 lazydeltabase=True,
2800 clearcaches=True,
2802 clearcaches=True,
2801 ):
2803 ):
2802 timings = []
2804 timings = []
2803 tr = _faketr()
2805 tr = _faketr()
2804 with _temprevlog(ui, orig, startrev) as dest:
2806 with _temprevlog(ui, orig, startrev) as dest:
2805 dest._lazydeltabase = lazydeltabase
2807 dest._lazydeltabase = lazydeltabase
2806 revs = list(orig.revs(startrev, stoprev))
2808 revs = list(orig.revs(startrev, stoprev))
2807 total = len(revs)
2809 total = len(revs)
2808 topic = 'adding'
2810 topic = 'adding'
2809 if runidx is not None:
2811 if runidx is not None:
2810 topic += ' (run #%d)' % runidx
2812 topic += ' (run #%d)' % runidx
2811 # Support both old and new progress API
2813 # Support both old and new progress API
2812 if util.safehasattr(ui, 'makeprogress'):
2814 if util.safehasattr(ui, 'makeprogress'):
2813 progress = ui.makeprogress(topic, unit='revs', total=total)
2815 progress = ui.makeprogress(topic, unit='revs', total=total)
2814
2816
2815 def updateprogress(pos):
2817 def updateprogress(pos):
2816 progress.update(pos)
2818 progress.update(pos)
2817
2819
2818 def completeprogress():
2820 def completeprogress():
2819 progress.complete()
2821 progress.complete()
2820
2822
2821 else:
2823 else:
2822
2824
2823 def updateprogress(pos):
2825 def updateprogress(pos):
2824 ui.progress(topic, pos, unit='revs', total=total)
2826 ui.progress(topic, pos, unit='revs', total=total)
2825
2827
2826 def completeprogress():
2828 def completeprogress():
2827 ui.progress(topic, None, unit='revs', total=total)
2829 ui.progress(topic, None, unit='revs', total=total)
2828
2830
2829 for idx, rev in enumerate(revs):
2831 for idx, rev in enumerate(revs):
2830 updateprogress(idx)
2832 updateprogress(idx)
2831 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2833 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2832 if clearcaches:
2834 if clearcaches:
2833 dest.index.clearcaches()
2835 dest.index.clearcaches()
2834 dest.clearcaches()
2836 dest.clearcaches()
2835 with timeone() as r:
2837 with timeone() as r:
2836 dest.addrawrevision(*addargs, **addkwargs)
2838 dest.addrawrevision(*addargs, **addkwargs)
2837 timings.append((rev, r[0]))
2839 timings.append((rev, r[0]))
2838 updateprogress(total)
2840 updateprogress(total)
2839 completeprogress()
2841 completeprogress()
2840 return timings
2842 return timings
2841
2843
2842
2844
2843 def _getrevisionseed(orig, rev, tr, source):
2845 def _getrevisionseed(orig, rev, tr, source):
2844 from mercurial.node import nullid
2846 from mercurial.node import nullid
2845
2847
2846 linkrev = orig.linkrev(rev)
2848 linkrev = orig.linkrev(rev)
2847 node = orig.node(rev)
2849 node = orig.node(rev)
2848 p1, p2 = orig.parents(node)
2850 p1, p2 = orig.parents(node)
2849 flags = orig.flags(rev)
2851 flags = orig.flags(rev)
2850 cachedelta = None
2852 cachedelta = None
2851 text = None
2853 text = None
2852
2854
2853 if source == b'full':
2855 if source == b'full':
2854 text = orig.revision(rev)
2856 text = orig.revision(rev)
2855 elif source == b'parent-1':
2857 elif source == b'parent-1':
2856 baserev = orig.rev(p1)
2858 baserev = orig.rev(p1)
2857 cachedelta = (baserev, orig.revdiff(p1, rev))
2859 cachedelta = (baserev, orig.revdiff(p1, rev))
2858 elif source == b'parent-2':
2860 elif source == b'parent-2':
2859 parent = p2
2861 parent = p2
2860 if p2 == nullid:
2862 if p2 == nullid:
2861 parent = p1
2863 parent = p1
2862 baserev = orig.rev(parent)
2864 baserev = orig.rev(parent)
2863 cachedelta = (baserev, orig.revdiff(parent, rev))
2865 cachedelta = (baserev, orig.revdiff(parent, rev))
2864 elif source == b'parent-smallest':
2866 elif source == b'parent-smallest':
2865 p1diff = orig.revdiff(p1, rev)
2867 p1diff = orig.revdiff(p1, rev)
2866 parent = p1
2868 parent = p1
2867 diff = p1diff
2869 diff = p1diff
2868 if p2 != nullid:
2870 if p2 != nullid:
2869 p2diff = orig.revdiff(p2, rev)
2871 p2diff = orig.revdiff(p2, rev)
2870 if len(p1diff) > len(p2diff):
2872 if len(p1diff) > len(p2diff):
2871 parent = p2
2873 parent = p2
2872 diff = p2diff
2874 diff = p2diff
2873 baserev = orig.rev(parent)
2875 baserev = orig.rev(parent)
2874 cachedelta = (baserev, diff)
2876 cachedelta = (baserev, diff)
2875 elif source == b'storage':
2877 elif source == b'storage':
2876 baserev = orig.deltaparent(rev)
2878 baserev = orig.deltaparent(rev)
2877 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2879 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2878
2880
2879 return (
2881 return (
2880 (text, tr, linkrev, p1, p2),
2882 (text, tr, linkrev, p1, p2),
2881 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2883 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2882 )
2884 )
2883
2885
2884
2886
2885 @contextlib.contextmanager
2887 @contextlib.contextmanager
2886 def _temprevlog(ui, orig, truncaterev):
2888 def _temprevlog(ui, orig, truncaterev):
2887 from mercurial import vfs as vfsmod
2889 from mercurial import vfs as vfsmod
2888
2890
2889 if orig._inline:
2891 if orig._inline:
2890 raise error.Abort('not supporting inline revlog (yet)')
2892 raise error.Abort('not supporting inline revlog (yet)')
2891 revlogkwargs = {}
2893 revlogkwargs = {}
2892 k = 'upperboundcomp'
2894 k = 'upperboundcomp'
2893 if util.safehasattr(orig, k):
2895 if util.safehasattr(orig, k):
2894 revlogkwargs[k] = getattr(orig, k)
2896 revlogkwargs[k] = getattr(orig, k)
2895
2897
2896 origindexpath = orig.opener.join(orig.indexfile)
2898 origindexpath = orig.opener.join(orig.indexfile)
2897 origdatapath = orig.opener.join(orig.datafile)
2899 origdatapath = orig.opener.join(orig.datafile)
2898 indexname = 'revlog.i'
2900 indexname = 'revlog.i'
2899 dataname = 'revlog.d'
2901 dataname = 'revlog.d'
2900
2902
2901 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2903 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2902 try:
2904 try:
2903 # copy the data file in a temporary directory
2905 # copy the data file in a temporary directory
2904 ui.debug('copying data in %s\n' % tmpdir)
2906 ui.debug('copying data in %s\n' % tmpdir)
2905 destindexpath = os.path.join(tmpdir, 'revlog.i')
2907 destindexpath = os.path.join(tmpdir, 'revlog.i')
2906 destdatapath = os.path.join(tmpdir, 'revlog.d')
2908 destdatapath = os.path.join(tmpdir, 'revlog.d')
2907 shutil.copyfile(origindexpath, destindexpath)
2909 shutil.copyfile(origindexpath, destindexpath)
2908 shutil.copyfile(origdatapath, destdatapath)
2910 shutil.copyfile(origdatapath, destdatapath)
2909
2911
2910 # remove the data we want to add again
2912 # remove the data we want to add again
2911 ui.debug('truncating data to be rewritten\n')
2913 ui.debug('truncating data to be rewritten\n')
2912 with open(destindexpath, 'ab') as index:
2914 with open(destindexpath, 'ab') as index:
2913 index.seek(0)
2915 index.seek(0)
2914 index.truncate(truncaterev * orig._io.size)
2916 index.truncate(truncaterev * orig._io.size)
2915 with open(destdatapath, 'ab') as data:
2917 with open(destdatapath, 'ab') as data:
2916 data.seek(0)
2918 data.seek(0)
2917 data.truncate(orig.start(truncaterev))
2919 data.truncate(orig.start(truncaterev))
2918
2920
2919 # instantiate a new revlog from the temporary copy
2921 # instantiate a new revlog from the temporary copy
2920 ui.debug('truncating adding to be rewritten\n')
2922 ui.debug('truncating adding to be rewritten\n')
2921 vfs = vfsmod.vfs(tmpdir)
2923 vfs = vfsmod.vfs(tmpdir)
2922 vfs.options = getattr(orig.opener, 'options', None)
2924 vfs.options = getattr(orig.opener, 'options', None)
2923
2925
2924 dest = revlog.revlog(
2926 dest = revlog.revlog(
2925 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2927 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2926 )
2928 )
2927 if dest._inline:
2929 if dest._inline:
2928 raise error.Abort('not supporting inline revlog (yet)')
2930 raise error.Abort('not supporting inline revlog (yet)')
2929 # make sure internals are initialized
2931 # make sure internals are initialized
2930 dest.revision(len(dest) - 1)
2932 dest.revision(len(dest) - 1)
2931 yield dest
2933 yield dest
2932 del dest, vfs
2934 del dest, vfs
2933 finally:
2935 finally:
2934 shutil.rmtree(tmpdir, True)
2936 shutil.rmtree(tmpdir, True)
2935
2937
2936
2938
2937 @command(
2939 @command(
2938 b'perfrevlogchunks',
2940 b'perfrevlogchunks',
2939 revlogopts
2941 revlogopts
2940 + formatteropts
2942 + formatteropts
2941 + [
2943 + [
2942 (b'e', b'engines', b'', b'compression engines to use'),
2944 (b'e', b'engines', b'', b'compression engines to use'),
2943 (b's', b'startrev', 0, b'revision to start at'),
2945 (b's', b'startrev', 0, b'revision to start at'),
2944 ],
2946 ],
2945 b'-c|-m|FILE',
2947 b'-c|-m|FILE',
2946 )
2948 )
2947 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2949 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2948 """Benchmark operations on revlog chunks.
2950 """Benchmark operations on revlog chunks.
2949
2951
2950 Logically, each revlog is a collection of fulltext revisions. However,
2952 Logically, each revlog is a collection of fulltext revisions. However,
2951 stored within each revlog are "chunks" of possibly compressed data. This
2953 stored within each revlog are "chunks" of possibly compressed data. This
2952 data needs to be read and decompressed or compressed and written.
2954 data needs to be read and decompressed or compressed and written.
2953
2955
2954 This command measures the time it takes to read+decompress and recompress
2956 This command measures the time it takes to read+decompress and recompress
2955 chunks in a revlog. It effectively isolates I/O and compression performance.
2957 chunks in a revlog. It effectively isolates I/O and compression performance.
2956 For measurements of higher-level operations like resolving revisions,
2958 For measurements of higher-level operations like resolving revisions,
2957 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2959 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2958 """
2960 """
2959 opts = _byteskwargs(opts)
2961 opts = _byteskwargs(opts)
2960
2962
2961 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2963 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2962
2964
2963 # _chunkraw was renamed to _getsegmentforrevs.
2965 # _chunkraw was renamed to _getsegmentforrevs.
2964 try:
2966 try:
2965 segmentforrevs = rl._getsegmentforrevs
2967 segmentforrevs = rl._getsegmentforrevs
2966 except AttributeError:
2968 except AttributeError:
2967 segmentforrevs = rl._chunkraw
2969 segmentforrevs = rl._chunkraw
2968
2970
2969 # Verify engines argument.
2971 # Verify engines argument.
2970 if engines:
2972 if engines:
2971 engines = set(e.strip() for e in engines.split(b','))
2973 engines = set(e.strip() for e in engines.split(b','))
2972 for engine in engines:
2974 for engine in engines:
2973 try:
2975 try:
2974 util.compressionengines[engine]
2976 util.compressionengines[engine]
2975 except KeyError:
2977 except KeyError:
2976 raise error.Abort(b'unknown compression engine: %s' % engine)
2978 raise error.Abort(b'unknown compression engine: %s' % engine)
2977 else:
2979 else:
2978 engines = []
2980 engines = []
2979 for e in util.compengines:
2981 for e in util.compengines:
2980 engine = util.compengines[e]
2982 engine = util.compengines[e]
2981 try:
2983 try:
2982 if engine.available():
2984 if engine.available():
2983 engine.revlogcompressor().compress(b'dummy')
2985 engine.revlogcompressor().compress(b'dummy')
2984 engines.append(e)
2986 engines.append(e)
2985 except NotImplementedError:
2987 except NotImplementedError:
2986 pass
2988 pass
2987
2989
2988 revs = list(rl.revs(startrev, len(rl) - 1))
2990 revs = list(rl.revs(startrev, len(rl) - 1))
2989
2991
2990 def rlfh(rl):
2992 def rlfh(rl):
2991 if rl._inline:
2993 if rl._inline:
2992 return getsvfs(repo)(rl.indexfile)
2994 return getsvfs(repo)(rl.indexfile)
2993 else:
2995 else:
2994 return getsvfs(repo)(rl.datafile)
2996 return getsvfs(repo)(rl.datafile)
2995
2997
2996 def doread():
2998 def doread():
2997 rl.clearcaches()
2999 rl.clearcaches()
2998 for rev in revs:
3000 for rev in revs:
2999 segmentforrevs(rev, rev)
3001 segmentforrevs(rev, rev)
3000
3002
3001 def doreadcachedfh():
3003 def doreadcachedfh():
3002 rl.clearcaches()
3004 rl.clearcaches()
3003 fh = rlfh(rl)
3005 fh = rlfh(rl)
3004 for rev in revs:
3006 for rev in revs:
3005 segmentforrevs(rev, rev, df=fh)
3007 segmentforrevs(rev, rev, df=fh)
3006
3008
3007 def doreadbatch():
3009 def doreadbatch():
3008 rl.clearcaches()
3010 rl.clearcaches()
3009 segmentforrevs(revs[0], revs[-1])
3011 segmentforrevs(revs[0], revs[-1])
3010
3012
3011 def doreadbatchcachedfh():
3013 def doreadbatchcachedfh():
3012 rl.clearcaches()
3014 rl.clearcaches()
3013 fh = rlfh(rl)
3015 fh = rlfh(rl)
3014 segmentforrevs(revs[0], revs[-1], df=fh)
3016 segmentforrevs(revs[0], revs[-1], df=fh)
3015
3017
3016 def dochunk():
3018 def dochunk():
3017 rl.clearcaches()
3019 rl.clearcaches()
3018 fh = rlfh(rl)
3020 fh = rlfh(rl)
3019 for rev in revs:
3021 for rev in revs:
3020 rl._chunk(rev, df=fh)
3022 rl._chunk(rev, df=fh)
3021
3023
3022 chunks = [None]
3024 chunks = [None]
3023
3025
3024 def dochunkbatch():
3026 def dochunkbatch():
3025 rl.clearcaches()
3027 rl.clearcaches()
3026 fh = rlfh(rl)
3028 fh = rlfh(rl)
3027 # Save chunks as a side-effect.
3029 # Save chunks as a side-effect.
3028 chunks[0] = rl._chunks(revs, df=fh)
3030 chunks[0] = rl._chunks(revs, df=fh)
3029
3031
3030 def docompress(compressor):
3032 def docompress(compressor):
3031 rl.clearcaches()
3033 rl.clearcaches()
3032
3034
3033 try:
3035 try:
3034 # Swap in the requested compression engine.
3036 # Swap in the requested compression engine.
3035 oldcompressor = rl._compressor
3037 oldcompressor = rl._compressor
3036 rl._compressor = compressor
3038 rl._compressor = compressor
3037 for chunk in chunks[0]:
3039 for chunk in chunks[0]:
3038 rl.compress(chunk)
3040 rl.compress(chunk)
3039 finally:
3041 finally:
3040 rl._compressor = oldcompressor
3042 rl._compressor = oldcompressor
3041
3043
3042 benches = [
3044 benches = [
3043 (lambda: doread(), b'read'),
3045 (lambda: doread(), b'read'),
3044 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3046 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3045 (lambda: doreadbatch(), b'read batch'),
3047 (lambda: doreadbatch(), b'read batch'),
3046 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3048 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3047 (lambda: dochunk(), b'chunk'),
3049 (lambda: dochunk(), b'chunk'),
3048 (lambda: dochunkbatch(), b'chunk batch'),
3050 (lambda: dochunkbatch(), b'chunk batch'),
3049 ]
3051 ]
3050
3052
3051 for engine in sorted(engines):
3053 for engine in sorted(engines):
3052 compressor = util.compengines[engine].revlogcompressor()
3054 compressor = util.compengines[engine].revlogcompressor()
3053 benches.append(
3055 benches.append(
3054 (
3056 (
3055 functools.partial(docompress, compressor),
3057 functools.partial(docompress, compressor),
3056 b'compress w/ %s' % engine,
3058 b'compress w/ %s' % engine,
3057 )
3059 )
3058 )
3060 )
3059
3061
3060 for fn, title in benches:
3062 for fn, title in benches:
3061 timer, fm = gettimer(ui, opts)
3063 timer, fm = gettimer(ui, opts)
3062 timer(fn, title=title)
3064 timer(fn, title=title)
3063 fm.end()
3065 fm.end()
3064
3066
3065
3067
3066 @command(
3068 @command(
3067 b'perfrevlogrevision',
3069 b'perfrevlogrevision',
3068 revlogopts
3070 revlogopts
3069 + formatteropts
3071 + formatteropts
3070 + [(b'', b'cache', False, b'use caches instead of clearing')],
3072 + [(b'', b'cache', False, b'use caches instead of clearing')],
3071 b'-c|-m|FILE REV',
3073 b'-c|-m|FILE REV',
3072 )
3074 )
3073 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3075 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3074 """Benchmark obtaining a revlog revision.
3076 """Benchmark obtaining a revlog revision.
3075
3077
3076 Obtaining a revlog revision consists of roughly the following steps:
3078 Obtaining a revlog revision consists of roughly the following steps:
3077
3079
3078 1. Compute the delta chain
3080 1. Compute the delta chain
3079 2. Slice the delta chain if applicable
3081 2. Slice the delta chain if applicable
3080 3. Obtain the raw chunks for that delta chain
3082 3. Obtain the raw chunks for that delta chain
3081 4. Decompress each raw chunk
3083 4. Decompress each raw chunk
3082 5. Apply binary patches to obtain fulltext
3084 5. Apply binary patches to obtain fulltext
3083 6. Verify hash of fulltext
3085 6. Verify hash of fulltext
3084
3086
3085 This command measures the time spent in each of these phases.
3087 This command measures the time spent in each of these phases.
3086 """
3088 """
3087 opts = _byteskwargs(opts)
3089 opts = _byteskwargs(opts)
3088
3090
3089 if opts.get(b'changelog') or opts.get(b'manifest'):
3091 if opts.get(b'changelog') or opts.get(b'manifest'):
3090 file_, rev = None, file_
3092 file_, rev = None, file_
3091 elif rev is None:
3093 elif rev is None:
3092 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3094 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3093
3095
3094 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3096 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3095
3097
3096 # _chunkraw was renamed to _getsegmentforrevs.
3098 # _chunkraw was renamed to _getsegmentforrevs.
3097 try:
3099 try:
3098 segmentforrevs = r._getsegmentforrevs
3100 segmentforrevs = r._getsegmentforrevs
3099 except AttributeError:
3101 except AttributeError:
3100 segmentforrevs = r._chunkraw
3102 segmentforrevs = r._chunkraw
3101
3103
3102 node = r.lookup(rev)
3104 node = r.lookup(rev)
3103 rev = r.rev(node)
3105 rev = r.rev(node)
3104
3106
3105 def getrawchunks(data, chain):
3107 def getrawchunks(data, chain):
3106 start = r.start
3108 start = r.start
3107 length = r.length
3109 length = r.length
3108 inline = r._inline
3110 inline = r._inline
3109 iosize = r._io.size
3111 iosize = r._io.size
3110 buffer = util.buffer
3112 buffer = util.buffer
3111
3113
3112 chunks = []
3114 chunks = []
3113 ladd = chunks.append
3115 ladd = chunks.append
3114 for idx, item in enumerate(chain):
3116 for idx, item in enumerate(chain):
3115 offset = start(item[0])
3117 offset = start(item[0])
3116 bits = data[idx]
3118 bits = data[idx]
3117 for rev in item:
3119 for rev in item:
3118 chunkstart = start(rev)
3120 chunkstart = start(rev)
3119 if inline:
3121 if inline:
3120 chunkstart += (rev + 1) * iosize
3122 chunkstart += (rev + 1) * iosize
3121 chunklength = length(rev)
3123 chunklength = length(rev)
3122 ladd(buffer(bits, chunkstart - offset, chunklength))
3124 ladd(buffer(bits, chunkstart - offset, chunklength))
3123
3125
3124 return chunks
3126 return chunks
3125
3127
3126 def dodeltachain(rev):
3128 def dodeltachain(rev):
3127 if not cache:
3129 if not cache:
3128 r.clearcaches()
3130 r.clearcaches()
3129 r._deltachain(rev)
3131 r._deltachain(rev)
3130
3132
3131 def doread(chain):
3133 def doread(chain):
3132 if not cache:
3134 if not cache:
3133 r.clearcaches()
3135 r.clearcaches()
3134 for item in slicedchain:
3136 for item in slicedchain:
3135 segmentforrevs(item[0], item[-1])
3137 segmentforrevs(item[0], item[-1])
3136
3138
3137 def doslice(r, chain, size):
3139 def doslice(r, chain, size):
3138 for s in slicechunk(r, chain, targetsize=size):
3140 for s in slicechunk(r, chain, targetsize=size):
3139 pass
3141 pass
3140
3142
3141 def dorawchunks(data, chain):
3143 def dorawchunks(data, chain):
3142 if not cache:
3144 if not cache:
3143 r.clearcaches()
3145 r.clearcaches()
3144 getrawchunks(data, chain)
3146 getrawchunks(data, chain)
3145
3147
3146 def dodecompress(chunks):
3148 def dodecompress(chunks):
3147 decomp = r.decompress
3149 decomp = r.decompress
3148 for chunk in chunks:
3150 for chunk in chunks:
3149 decomp(chunk)
3151 decomp(chunk)
3150
3152
3151 def dopatch(text, bins):
3153 def dopatch(text, bins):
3152 if not cache:
3154 if not cache:
3153 r.clearcaches()
3155 r.clearcaches()
3154 mdiff.patches(text, bins)
3156 mdiff.patches(text, bins)
3155
3157
3156 def dohash(text):
3158 def dohash(text):
3157 if not cache:
3159 if not cache:
3158 r.clearcaches()
3160 r.clearcaches()
3159 r.checkhash(text, node, rev=rev)
3161 r.checkhash(text, node, rev=rev)
3160
3162
3161 def dorevision():
3163 def dorevision():
3162 if not cache:
3164 if not cache:
3163 r.clearcaches()
3165 r.clearcaches()
3164 r.revision(node)
3166 r.revision(node)
3165
3167
3166 try:
3168 try:
3167 from mercurial.revlogutils.deltas import slicechunk
3169 from mercurial.revlogutils.deltas import slicechunk
3168 except ImportError:
3170 except ImportError:
3169 slicechunk = getattr(revlog, '_slicechunk', None)
3171 slicechunk = getattr(revlog, '_slicechunk', None)
3170
3172
3171 size = r.length(rev)
3173 size = r.length(rev)
3172 chain = r._deltachain(rev)[0]
3174 chain = r._deltachain(rev)[0]
3173 if not getattr(r, '_withsparseread', False):
3175 if not getattr(r, '_withsparseread', False):
3174 slicedchain = (chain,)
3176 slicedchain = (chain,)
3175 else:
3177 else:
3176 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3178 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3177 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3179 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3178 rawchunks = getrawchunks(data, slicedchain)
3180 rawchunks = getrawchunks(data, slicedchain)
3179 bins = r._chunks(chain)
3181 bins = r._chunks(chain)
3180 text = bytes(bins[0])
3182 text = bytes(bins[0])
3181 bins = bins[1:]
3183 bins = bins[1:]
3182 text = mdiff.patches(text, bins)
3184 text = mdiff.patches(text, bins)
3183
3185
3184 benches = [
3186 benches = [
3185 (lambda: dorevision(), b'full'),
3187 (lambda: dorevision(), b'full'),
3186 (lambda: dodeltachain(rev), b'deltachain'),
3188 (lambda: dodeltachain(rev), b'deltachain'),
3187 (lambda: doread(chain), b'read'),
3189 (lambda: doread(chain), b'read'),
3188 ]
3190 ]
3189
3191
3190 if getattr(r, '_withsparseread', False):
3192 if getattr(r, '_withsparseread', False):
3191 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3193 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3192 benches.append(slicing)
3194 benches.append(slicing)
3193
3195
3194 benches.extend(
3196 benches.extend(
3195 [
3197 [
3196 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3198 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3197 (lambda: dodecompress(rawchunks), b'decompress'),
3199 (lambda: dodecompress(rawchunks), b'decompress'),
3198 (lambda: dopatch(text, bins), b'patch'),
3200 (lambda: dopatch(text, bins), b'patch'),
3199 (lambda: dohash(text), b'hash'),
3201 (lambda: dohash(text), b'hash'),
3200 ]
3202 ]
3201 )
3203 )
3202
3204
3203 timer, fm = gettimer(ui, opts)
3205 timer, fm = gettimer(ui, opts)
3204 for fn, title in benches:
3206 for fn, title in benches:
3205 timer(fn, title=title)
3207 timer(fn, title=title)
3206 fm.end()
3208 fm.end()
3207
3209
3208
3210
3209 @command(
3211 @command(
3210 b'perfrevset',
3212 b'perfrevset',
3211 [
3213 [
3212 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3214 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3213 (b'', b'contexts', False, b'obtain changectx for each revision'),
3215 (b'', b'contexts', False, b'obtain changectx for each revision'),
3214 ]
3216 ]
3215 + formatteropts,
3217 + formatteropts,
3216 b"REVSET",
3218 b"REVSET",
3217 )
3219 )
3218 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3220 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3219 """benchmark the execution time of a revset
3221 """benchmark the execution time of a revset
3220
3222
3221 Use the --clean option if need to evaluate the impact of build volatile
3223 Use the --clean option if need to evaluate the impact of build volatile
3222 revisions set cache on the revset execution. Volatile cache hold filtered
3224 revisions set cache on the revset execution. Volatile cache hold filtered
3223 and obsolete related cache."""
3225 and obsolete related cache."""
3224 opts = _byteskwargs(opts)
3226 opts = _byteskwargs(opts)
3225
3227
3226 timer, fm = gettimer(ui, opts)
3228 timer, fm = gettimer(ui, opts)
3227
3229
3228 def d():
3230 def d():
3229 if clear:
3231 if clear:
3230 repo.invalidatevolatilesets()
3232 repo.invalidatevolatilesets()
3231 if contexts:
3233 if contexts:
3232 for ctx in repo.set(expr):
3234 for ctx in repo.set(expr):
3233 pass
3235 pass
3234 else:
3236 else:
3235 for r in repo.revs(expr):
3237 for r in repo.revs(expr):
3236 pass
3238 pass
3237
3239
3238 timer(d)
3240 timer(d)
3239 fm.end()
3241 fm.end()
3240
3242
3241
3243
3242 @command(
3244 @command(
3243 b'perfvolatilesets',
3245 b'perfvolatilesets',
3244 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3246 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3245 + formatteropts,
3247 + formatteropts,
3246 )
3248 )
3247 def perfvolatilesets(ui, repo, *names, **opts):
3249 def perfvolatilesets(ui, repo, *names, **opts):
3248 """benchmark the computation of various volatile set
3250 """benchmark the computation of various volatile set
3249
3251
3250 Volatile set computes element related to filtering and obsolescence."""
3252 Volatile set computes element related to filtering and obsolescence."""
3251 opts = _byteskwargs(opts)
3253 opts = _byteskwargs(opts)
3252 timer, fm = gettimer(ui, opts)
3254 timer, fm = gettimer(ui, opts)
3253 repo = repo.unfiltered()
3255 repo = repo.unfiltered()
3254
3256
3255 def getobs(name):
3257 def getobs(name):
3256 def d():
3258 def d():
3257 repo.invalidatevolatilesets()
3259 repo.invalidatevolatilesets()
3258 if opts[b'clear_obsstore']:
3260 if opts[b'clear_obsstore']:
3259 clearfilecache(repo, b'obsstore')
3261 clearfilecache(repo, b'obsstore')
3260 obsolete.getrevs(repo, name)
3262 obsolete.getrevs(repo, name)
3261
3263
3262 return d
3264 return d
3263
3265
3264 allobs = sorted(obsolete.cachefuncs)
3266 allobs = sorted(obsolete.cachefuncs)
3265 if names:
3267 if names:
3266 allobs = [n for n in allobs if n in names]
3268 allobs = [n for n in allobs if n in names]
3267
3269
3268 for name in allobs:
3270 for name in allobs:
3269 timer(getobs(name), title=name)
3271 timer(getobs(name), title=name)
3270
3272
3271 def getfiltered(name):
3273 def getfiltered(name):
3272 def d():
3274 def d():
3273 repo.invalidatevolatilesets()
3275 repo.invalidatevolatilesets()
3274 if opts[b'clear_obsstore']:
3276 if opts[b'clear_obsstore']:
3275 clearfilecache(repo, b'obsstore')
3277 clearfilecache(repo, b'obsstore')
3276 repoview.filterrevs(repo, name)
3278 repoview.filterrevs(repo, name)
3277
3279
3278 return d
3280 return d
3279
3281
3280 allfilter = sorted(repoview.filtertable)
3282 allfilter = sorted(repoview.filtertable)
3281 if names:
3283 if names:
3282 allfilter = [n for n in allfilter if n in names]
3284 allfilter = [n for n in allfilter if n in names]
3283
3285
3284 for name in allfilter:
3286 for name in allfilter:
3285 timer(getfiltered(name), title=name)
3287 timer(getfiltered(name), title=name)
3286 fm.end()
3288 fm.end()
3287
3289
3288
3290
3289 @command(
3291 @command(
3290 b'perfbranchmap',
3292 b'perfbranchmap',
3291 [
3293 [
3292 (b'f', b'full', False, b'Includes build time of subset'),
3294 (b'f', b'full', False, b'Includes build time of subset'),
3293 (
3295 (
3294 b'',
3296 b'',
3295 b'clear-revbranch',
3297 b'clear-revbranch',
3296 False,
3298 False,
3297 b'purge the revbranch cache between computation',
3299 b'purge the revbranch cache between computation',
3298 ),
3300 ),
3299 ]
3301 ]
3300 + formatteropts,
3302 + formatteropts,
3301 )
3303 )
3302 def perfbranchmap(ui, repo, *filternames, **opts):
3304 def perfbranchmap(ui, repo, *filternames, **opts):
3303 """benchmark the update of a branchmap
3305 """benchmark the update of a branchmap
3304
3306
3305 This benchmarks the full repo.branchmap() call with read and write disabled
3307 This benchmarks the full repo.branchmap() call with read and write disabled
3306 """
3308 """
3307 opts = _byteskwargs(opts)
3309 opts = _byteskwargs(opts)
3308 full = opts.get(b"full", False)
3310 full = opts.get(b"full", False)
3309 clear_revbranch = opts.get(b"clear_revbranch", False)
3311 clear_revbranch = opts.get(b"clear_revbranch", False)
3310 timer, fm = gettimer(ui, opts)
3312 timer, fm = gettimer(ui, opts)
3311
3313
3312 def getbranchmap(filtername):
3314 def getbranchmap(filtername):
3313 """generate a benchmark function for the filtername"""
3315 """generate a benchmark function for the filtername"""
3314 if filtername is None:
3316 if filtername is None:
3315 view = repo
3317 view = repo
3316 else:
3318 else:
3317 view = repo.filtered(filtername)
3319 view = repo.filtered(filtername)
3318 if util.safehasattr(view._branchcaches, '_per_filter'):
3320 if util.safehasattr(view._branchcaches, '_per_filter'):
3319 filtered = view._branchcaches._per_filter
3321 filtered = view._branchcaches._per_filter
3320 else:
3322 else:
3321 # older versions
3323 # older versions
3322 filtered = view._branchcaches
3324 filtered = view._branchcaches
3323
3325
3324 def d():
3326 def d():
3325 if clear_revbranch:
3327 if clear_revbranch:
3326 repo.revbranchcache()._clear()
3328 repo.revbranchcache()._clear()
3327 if full:
3329 if full:
3328 view._branchcaches.clear()
3330 view._branchcaches.clear()
3329 else:
3331 else:
3330 filtered.pop(filtername, None)
3332 filtered.pop(filtername, None)
3331 view.branchmap()
3333 view.branchmap()
3332
3334
3333 return d
3335 return d
3334
3336
3335 # add filter in smaller subset to bigger subset
3337 # add filter in smaller subset to bigger subset
3336 possiblefilters = set(repoview.filtertable)
3338 possiblefilters = set(repoview.filtertable)
3337 if filternames:
3339 if filternames:
3338 possiblefilters &= set(filternames)
3340 possiblefilters &= set(filternames)
3339 subsettable = getbranchmapsubsettable()
3341 subsettable = getbranchmapsubsettable()
3340 allfilters = []
3342 allfilters = []
3341 while possiblefilters:
3343 while possiblefilters:
3342 for name in possiblefilters:
3344 for name in possiblefilters:
3343 subset = subsettable.get(name)
3345 subset = subsettable.get(name)
3344 if subset not in possiblefilters:
3346 if subset not in possiblefilters:
3345 break
3347 break
3346 else:
3348 else:
3347 assert False, b'subset cycle %s!' % possiblefilters
3349 assert False, b'subset cycle %s!' % possiblefilters
3348 allfilters.append(name)
3350 allfilters.append(name)
3349 possiblefilters.remove(name)
3351 possiblefilters.remove(name)
3350
3352
3351 # warm the cache
3353 # warm the cache
3352 if not full:
3354 if not full:
3353 for name in allfilters:
3355 for name in allfilters:
3354 repo.filtered(name).branchmap()
3356 repo.filtered(name).branchmap()
3355 if not filternames or b'unfiltered' in filternames:
3357 if not filternames or b'unfiltered' in filternames:
3356 # add unfiltered
3358 # add unfiltered
3357 allfilters.append(None)
3359 allfilters.append(None)
3358
3360
3359 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3361 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3360 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3362 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3361 branchcacheread.set(classmethod(lambda *args: None))
3363 branchcacheread.set(classmethod(lambda *args: None))
3362 else:
3364 else:
3363 # older versions
3365 # older versions
3364 branchcacheread = safeattrsetter(branchmap, b'read')
3366 branchcacheread = safeattrsetter(branchmap, b'read')
3365 branchcacheread.set(lambda *args: None)
3367 branchcacheread.set(lambda *args: None)
3366 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3368 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3367 branchcachewrite.set(lambda *args: None)
3369 branchcachewrite.set(lambda *args: None)
3368 try:
3370 try:
3369 for name in allfilters:
3371 for name in allfilters:
3370 printname = name
3372 printname = name
3371 if name is None:
3373 if name is None:
3372 printname = b'unfiltered'
3374 printname = b'unfiltered'
3373 timer(getbranchmap(name), title=str(printname))
3375 timer(getbranchmap(name), title=str(printname))
3374 finally:
3376 finally:
3375 branchcacheread.restore()
3377 branchcacheread.restore()
3376 branchcachewrite.restore()
3378 branchcachewrite.restore()
3377 fm.end()
3379 fm.end()
3378
3380
3379
3381
3380 @command(
3382 @command(
3381 b'perfbranchmapupdate',
3383 b'perfbranchmapupdate',
3382 [
3384 [
3383 (b'', b'base', [], b'subset of revision to start from'),
3385 (b'', b'base', [], b'subset of revision to start from'),
3384 (b'', b'target', [], b'subset of revision to end with'),
3386 (b'', b'target', [], b'subset of revision to end with'),
3385 (b'', b'clear-caches', False, b'clear cache between each runs'),
3387 (b'', b'clear-caches', False, b'clear cache between each runs'),
3386 ]
3388 ]
3387 + formatteropts,
3389 + formatteropts,
3388 )
3390 )
3389 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3391 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3390 """benchmark branchmap update from for <base> revs to <target> revs
3392 """benchmark branchmap update from for <base> revs to <target> revs
3391
3393
3392 If `--clear-caches` is passed, the following items will be reset before
3394 If `--clear-caches` is passed, the following items will be reset before
3393 each update:
3395 each update:
3394 * the changelog instance and associated indexes
3396 * the changelog instance and associated indexes
3395 * the rev-branch-cache instance
3397 * the rev-branch-cache instance
3396
3398
3397 Examples:
3399 Examples:
3398
3400
3399 # update for the one last revision
3401 # update for the one last revision
3400 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3402 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3401
3403
3402 $ update for change coming with a new branch
3404 $ update for change coming with a new branch
3403 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3405 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3404 """
3406 """
3405 from mercurial import branchmap
3407 from mercurial import branchmap
3406 from mercurial import repoview
3408 from mercurial import repoview
3407
3409
3408 opts = _byteskwargs(opts)
3410 opts = _byteskwargs(opts)
3409 timer, fm = gettimer(ui, opts)
3411 timer, fm = gettimer(ui, opts)
3410 clearcaches = opts[b'clear_caches']
3412 clearcaches = opts[b'clear_caches']
3411 unfi = repo.unfiltered()
3413 unfi = repo.unfiltered()
3412 x = [None] # used to pass data between closure
3414 x = [None] # used to pass data between closure
3413
3415
3414 # we use a `list` here to avoid possible side effect from smartset
3416 # we use a `list` here to avoid possible side effect from smartset
3415 baserevs = list(scmutil.revrange(repo, base))
3417 baserevs = list(scmutil.revrange(repo, base))
3416 targetrevs = list(scmutil.revrange(repo, target))
3418 targetrevs = list(scmutil.revrange(repo, target))
3417 if not baserevs:
3419 if not baserevs:
3418 raise error.Abort(b'no revisions selected for --base')
3420 raise error.Abort(b'no revisions selected for --base')
3419 if not targetrevs:
3421 if not targetrevs:
3420 raise error.Abort(b'no revisions selected for --target')
3422 raise error.Abort(b'no revisions selected for --target')
3421
3423
3422 # make sure the target branchmap also contains the one in the base
3424 # make sure the target branchmap also contains the one in the base
3423 targetrevs = list(set(baserevs) | set(targetrevs))
3425 targetrevs = list(set(baserevs) | set(targetrevs))
3424 targetrevs.sort()
3426 targetrevs.sort()
3425
3427
3426 cl = repo.changelog
3428 cl = repo.changelog
3427 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3429 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3428 allbaserevs.sort()
3430 allbaserevs.sort()
3429 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3431 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3430
3432
3431 newrevs = list(alltargetrevs.difference(allbaserevs))
3433 newrevs = list(alltargetrevs.difference(allbaserevs))
3432 newrevs.sort()
3434 newrevs.sort()
3433
3435
3434 allrevs = frozenset(unfi.changelog.revs())
3436 allrevs = frozenset(unfi.changelog.revs())
3435 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3437 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3436 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3438 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3437
3439
3438 def basefilter(repo, visibilityexceptions=None):
3440 def basefilter(repo, visibilityexceptions=None):
3439 return basefilterrevs
3441 return basefilterrevs
3440
3442
3441 def targetfilter(repo, visibilityexceptions=None):
3443 def targetfilter(repo, visibilityexceptions=None):
3442 return targetfilterrevs
3444 return targetfilterrevs
3443
3445
3444 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3446 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3445 ui.status(msg % (len(allbaserevs), len(newrevs)))
3447 ui.status(msg % (len(allbaserevs), len(newrevs)))
3446 if targetfilterrevs:
3448 if targetfilterrevs:
3447 msg = b'(%d revisions still filtered)\n'
3449 msg = b'(%d revisions still filtered)\n'
3448 ui.status(msg % len(targetfilterrevs))
3450 ui.status(msg % len(targetfilterrevs))
3449
3451
3450 try:
3452 try:
3451 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3453 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3452 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3454 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3453
3455
3454 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3456 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3455 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3457 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3456
3458
3457 # try to find an existing branchmap to reuse
3459 # try to find an existing branchmap to reuse
3458 subsettable = getbranchmapsubsettable()
3460 subsettable = getbranchmapsubsettable()
3459 candidatefilter = subsettable.get(None)
3461 candidatefilter = subsettable.get(None)
3460 while candidatefilter is not None:
3462 while candidatefilter is not None:
3461 candidatebm = repo.filtered(candidatefilter).branchmap()
3463 candidatebm = repo.filtered(candidatefilter).branchmap()
3462 if candidatebm.validfor(baserepo):
3464 if candidatebm.validfor(baserepo):
3463 filtered = repoview.filterrevs(repo, candidatefilter)
3465 filtered = repoview.filterrevs(repo, candidatefilter)
3464 missing = [r for r in allbaserevs if r in filtered]
3466 missing = [r for r in allbaserevs if r in filtered]
3465 base = candidatebm.copy()
3467 base = candidatebm.copy()
3466 base.update(baserepo, missing)
3468 base.update(baserepo, missing)
3467 break
3469 break
3468 candidatefilter = subsettable.get(candidatefilter)
3470 candidatefilter = subsettable.get(candidatefilter)
3469 else:
3471 else:
3470 # no suitable subset where found
3472 # no suitable subset where found
3471 base = branchmap.branchcache()
3473 base = branchmap.branchcache()
3472 base.update(baserepo, allbaserevs)
3474 base.update(baserepo, allbaserevs)
3473
3475
3474 def setup():
3476 def setup():
3475 x[0] = base.copy()
3477 x[0] = base.copy()
3476 if clearcaches:
3478 if clearcaches:
3477 unfi._revbranchcache = None
3479 unfi._revbranchcache = None
3478 clearchangelog(repo)
3480 clearchangelog(repo)
3479
3481
3480 def bench():
3482 def bench():
3481 x[0].update(targetrepo, newrevs)
3483 x[0].update(targetrepo, newrevs)
3482
3484
3483 timer(bench, setup=setup)
3485 timer(bench, setup=setup)
3484 fm.end()
3486 fm.end()
3485 finally:
3487 finally:
3486 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3488 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3487 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3489 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3488
3490
3489
3491
3490 @command(
3492 @command(
3491 b'perfbranchmapload',
3493 b'perfbranchmapload',
3492 [
3494 [
3493 (b'f', b'filter', b'', b'Specify repoview filter'),
3495 (b'f', b'filter', b'', b'Specify repoview filter'),
3494 (b'', b'list', False, b'List brachmap filter caches'),
3496 (b'', b'list', False, b'List brachmap filter caches'),
3495 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3497 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3496 ]
3498 ]
3497 + formatteropts,
3499 + formatteropts,
3498 )
3500 )
3499 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3501 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3500 """benchmark reading the branchmap"""
3502 """benchmark reading the branchmap"""
3501 opts = _byteskwargs(opts)
3503 opts = _byteskwargs(opts)
3502 clearrevlogs = opts[b'clear_revlogs']
3504 clearrevlogs = opts[b'clear_revlogs']
3503
3505
3504 if list:
3506 if list:
3505 for name, kind, st in repo.cachevfs.readdir(stat=True):
3507 for name, kind, st in repo.cachevfs.readdir(stat=True):
3506 if name.startswith(b'branch2'):
3508 if name.startswith(b'branch2'):
3507 filtername = name.partition(b'-')[2] or b'unfiltered'
3509 filtername = name.partition(b'-')[2] or b'unfiltered'
3508 ui.status(
3510 ui.status(
3509 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3511 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3510 )
3512 )
3511 return
3513 return
3512 if not filter:
3514 if not filter:
3513 filter = None
3515 filter = None
3514 subsettable = getbranchmapsubsettable()
3516 subsettable = getbranchmapsubsettable()
3515 if filter is None:
3517 if filter is None:
3516 repo = repo.unfiltered()
3518 repo = repo.unfiltered()
3517 else:
3519 else:
3518 repo = repoview.repoview(repo, filter)
3520 repo = repoview.repoview(repo, filter)
3519
3521
3520 repo.branchmap() # make sure we have a relevant, up to date branchmap
3522 repo.branchmap() # make sure we have a relevant, up to date branchmap
3521
3523
3522 try:
3524 try:
3523 fromfile = branchmap.branchcache.fromfile
3525 fromfile = branchmap.branchcache.fromfile
3524 except AttributeError:
3526 except AttributeError:
3525 # older versions
3527 # older versions
3526 fromfile = branchmap.read
3528 fromfile = branchmap.read
3527
3529
3528 currentfilter = filter
3530 currentfilter = filter
3529 # try once without timer, the filter may not be cached
3531 # try once without timer, the filter may not be cached
3530 while fromfile(repo) is None:
3532 while fromfile(repo) is None:
3531 currentfilter = subsettable.get(currentfilter)
3533 currentfilter = subsettable.get(currentfilter)
3532 if currentfilter is None:
3534 if currentfilter is None:
3533 raise error.Abort(
3535 raise error.Abort(
3534 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3536 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3535 )
3537 )
3536 repo = repo.filtered(currentfilter)
3538 repo = repo.filtered(currentfilter)
3537 timer, fm = gettimer(ui, opts)
3539 timer, fm = gettimer(ui, opts)
3538
3540
3539 def setup():
3541 def setup():
3540 if clearrevlogs:
3542 if clearrevlogs:
3541 clearchangelog(repo)
3543 clearchangelog(repo)
3542
3544
3543 def bench():
3545 def bench():
3544 fromfile(repo)
3546 fromfile(repo)
3545
3547
3546 timer(bench, setup=setup)
3548 timer(bench, setup=setup)
3547 fm.end()
3549 fm.end()
3548
3550
3549
3551
3550 @command(b'perfloadmarkers')
3552 @command(b'perfloadmarkers')
3551 def perfloadmarkers(ui, repo):
3553 def perfloadmarkers(ui, repo):
3552 """benchmark the time to parse the on-disk markers for a repo
3554 """benchmark the time to parse the on-disk markers for a repo
3553
3555
3554 Result is the number of markers in the repo."""
3556 Result is the number of markers in the repo."""
3555 timer, fm = gettimer(ui)
3557 timer, fm = gettimer(ui)
3556 svfs = getsvfs(repo)
3558 svfs = getsvfs(repo)
3557 timer(lambda: len(obsolete.obsstore(svfs)))
3559 timer(lambda: len(obsolete.obsstore(svfs)))
3558 fm.end()
3560 fm.end()
3559
3561
3560
3562
3561 @command(
3563 @command(
3562 b'perflrucachedict',
3564 b'perflrucachedict',
3563 formatteropts
3565 formatteropts
3564 + [
3566 + [
3565 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3567 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3566 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3568 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3567 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3569 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3568 (b'', b'size', 4, b'size of cache'),
3570 (b'', b'size', 4, b'size of cache'),
3569 (b'', b'gets', 10000, b'number of key lookups'),
3571 (b'', b'gets', 10000, b'number of key lookups'),
3570 (b'', b'sets', 10000, b'number of key sets'),
3572 (b'', b'sets', 10000, b'number of key sets'),
3571 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3573 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3572 (
3574 (
3573 b'',
3575 b'',
3574 b'mixedgetfreq',
3576 b'mixedgetfreq',
3575 50,
3577 50,
3576 b'frequency of get vs set ops in mixed mode',
3578 b'frequency of get vs set ops in mixed mode',
3577 ),
3579 ),
3578 ],
3580 ],
3579 norepo=True,
3581 norepo=True,
3580 )
3582 )
3581 def perflrucache(
3583 def perflrucache(
3582 ui,
3584 ui,
3583 mincost=0,
3585 mincost=0,
3584 maxcost=100,
3586 maxcost=100,
3585 costlimit=0,
3587 costlimit=0,
3586 size=4,
3588 size=4,
3587 gets=10000,
3589 gets=10000,
3588 sets=10000,
3590 sets=10000,
3589 mixed=10000,
3591 mixed=10000,
3590 mixedgetfreq=50,
3592 mixedgetfreq=50,
3591 **opts
3593 **opts
3592 ):
3594 ):
3593 opts = _byteskwargs(opts)
3595 opts = _byteskwargs(opts)
3594
3596
3595 def doinit():
3597 def doinit():
3596 for i in _xrange(10000):
3598 for i in _xrange(10000):
3597 util.lrucachedict(size)
3599 util.lrucachedict(size)
3598
3600
3599 costrange = list(range(mincost, maxcost + 1))
3601 costrange = list(range(mincost, maxcost + 1))
3600
3602
3601 values = []
3603 values = []
3602 for i in _xrange(size):
3604 for i in _xrange(size):
3603 values.append(random.randint(0, _maxint))
3605 values.append(random.randint(0, _maxint))
3604
3606
3605 # Get mode fills the cache and tests raw lookup performance with no
3607 # Get mode fills the cache and tests raw lookup performance with no
3606 # eviction.
3608 # eviction.
3607 getseq = []
3609 getseq = []
3608 for i in _xrange(gets):
3610 for i in _xrange(gets):
3609 getseq.append(random.choice(values))
3611 getseq.append(random.choice(values))
3610
3612
3611 def dogets():
3613 def dogets():
3612 d = util.lrucachedict(size)
3614 d = util.lrucachedict(size)
3613 for v in values:
3615 for v in values:
3614 d[v] = v
3616 d[v] = v
3615 for key in getseq:
3617 for key in getseq:
3616 value = d[key]
3618 value = d[key]
3617 value # silence pyflakes warning
3619 value # silence pyflakes warning
3618
3620
3619 def dogetscost():
3621 def dogetscost():
3620 d = util.lrucachedict(size, maxcost=costlimit)
3622 d = util.lrucachedict(size, maxcost=costlimit)
3621 for i, v in enumerate(values):
3623 for i, v in enumerate(values):
3622 d.insert(v, v, cost=costs[i])
3624 d.insert(v, v, cost=costs[i])
3623 for key in getseq:
3625 for key in getseq:
3624 try:
3626 try:
3625 value = d[key]
3627 value = d[key]
3626 value # silence pyflakes warning
3628 value # silence pyflakes warning
3627 except KeyError:
3629 except KeyError:
3628 pass
3630 pass
3629
3631
3630 # Set mode tests insertion speed with cache eviction.
3632 # Set mode tests insertion speed with cache eviction.
3631 setseq = []
3633 setseq = []
3632 costs = []
3634 costs = []
3633 for i in _xrange(sets):
3635 for i in _xrange(sets):
3634 setseq.append(random.randint(0, _maxint))
3636 setseq.append(random.randint(0, _maxint))
3635 costs.append(random.choice(costrange))
3637 costs.append(random.choice(costrange))
3636
3638
3637 def doinserts():
3639 def doinserts():
3638 d = util.lrucachedict(size)
3640 d = util.lrucachedict(size)
3639 for v in setseq:
3641 for v in setseq:
3640 d.insert(v, v)
3642 d.insert(v, v)
3641
3643
3642 def doinsertscost():
3644 def doinsertscost():
3643 d = util.lrucachedict(size, maxcost=costlimit)
3645 d = util.lrucachedict(size, maxcost=costlimit)
3644 for i, v in enumerate(setseq):
3646 for i, v in enumerate(setseq):
3645 d.insert(v, v, cost=costs[i])
3647 d.insert(v, v, cost=costs[i])
3646
3648
3647 def dosets():
3649 def dosets():
3648 d = util.lrucachedict(size)
3650 d = util.lrucachedict(size)
3649 for v in setseq:
3651 for v in setseq:
3650 d[v] = v
3652 d[v] = v
3651
3653
3652 # Mixed mode randomly performs gets and sets with eviction.
3654 # Mixed mode randomly performs gets and sets with eviction.
3653 mixedops = []
3655 mixedops = []
3654 for i in _xrange(mixed):
3656 for i in _xrange(mixed):
3655 r = random.randint(0, 100)
3657 r = random.randint(0, 100)
3656 if r < mixedgetfreq:
3658 if r < mixedgetfreq:
3657 op = 0
3659 op = 0
3658 else:
3660 else:
3659 op = 1
3661 op = 1
3660
3662
3661 mixedops.append(
3663 mixedops.append(
3662 (op, random.randint(0, size * 2), random.choice(costrange))
3664 (op, random.randint(0, size * 2), random.choice(costrange))
3663 )
3665 )
3664
3666
3665 def domixed():
3667 def domixed():
3666 d = util.lrucachedict(size)
3668 d = util.lrucachedict(size)
3667
3669
3668 for op, v, cost in mixedops:
3670 for op, v, cost in mixedops:
3669 if op == 0:
3671 if op == 0:
3670 try:
3672 try:
3671 d[v]
3673 d[v]
3672 except KeyError:
3674 except KeyError:
3673 pass
3675 pass
3674 else:
3676 else:
3675 d[v] = v
3677 d[v] = v
3676
3678
3677 def domixedcost():
3679 def domixedcost():
3678 d = util.lrucachedict(size, maxcost=costlimit)
3680 d = util.lrucachedict(size, maxcost=costlimit)
3679
3681
3680 for op, v, cost in mixedops:
3682 for op, v, cost in mixedops:
3681 if op == 0:
3683 if op == 0:
3682 try:
3684 try:
3683 d[v]
3685 d[v]
3684 except KeyError:
3686 except KeyError:
3685 pass
3687 pass
3686 else:
3688 else:
3687 d.insert(v, v, cost=cost)
3689 d.insert(v, v, cost=cost)
3688
3690
3689 benches = [
3691 benches = [
3690 (doinit, b'init'),
3692 (doinit, b'init'),
3691 ]
3693 ]
3692
3694
3693 if costlimit:
3695 if costlimit:
3694 benches.extend(
3696 benches.extend(
3695 [
3697 [
3696 (dogetscost, b'gets w/ cost limit'),
3698 (dogetscost, b'gets w/ cost limit'),
3697 (doinsertscost, b'inserts w/ cost limit'),
3699 (doinsertscost, b'inserts w/ cost limit'),
3698 (domixedcost, b'mixed w/ cost limit'),
3700 (domixedcost, b'mixed w/ cost limit'),
3699 ]
3701 ]
3700 )
3702 )
3701 else:
3703 else:
3702 benches.extend(
3704 benches.extend(
3703 [
3705 [
3704 (dogets, b'gets'),
3706 (dogets, b'gets'),
3705 (doinserts, b'inserts'),
3707 (doinserts, b'inserts'),
3706 (dosets, b'sets'),
3708 (dosets, b'sets'),
3707 (domixed, b'mixed'),
3709 (domixed, b'mixed'),
3708 ]
3710 ]
3709 )
3711 )
3710
3712
3711 for fn, title in benches:
3713 for fn, title in benches:
3712 timer, fm = gettimer(ui, opts)
3714 timer, fm = gettimer(ui, opts)
3713 timer(fn, title=title)
3715 timer(fn, title=title)
3714 fm.end()
3716 fm.end()
3715
3717
3716
3718
3717 @command(b'perfwrite', formatteropts)
3719 @command(b'perfwrite', formatteropts)
3718 def perfwrite(ui, repo, **opts):
3720 def perfwrite(ui, repo, **opts):
3719 """microbenchmark ui.write
3721 """microbenchmark ui.write
3720 """
3722 """
3721 opts = _byteskwargs(opts)
3723 opts = _byteskwargs(opts)
3722
3724
3723 timer, fm = gettimer(ui, opts)
3725 timer, fm = gettimer(ui, opts)
3724
3726
3725 def write():
3727 def write():
3726 for i in range(100000):
3728 for i in range(100000):
3727 ui.writenoi18n(b'Testing write performance\n')
3729 ui.writenoi18n(b'Testing write performance\n')
3728
3730
3729 timer(write)
3731 timer(write)
3730 fm.end()
3732 fm.end()
3731
3733
3732
3734
3733 def uisetup(ui):
3735 def uisetup(ui):
3734 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3736 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3735 commands, b'debugrevlogopts'
3737 commands, b'debugrevlogopts'
3736 ):
3738 ):
3737 # for "historical portability":
3739 # for "historical portability":
3738 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3740 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3739 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3741 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3740 # openrevlog() should cause failure, because it has been
3742 # openrevlog() should cause failure, because it has been
3741 # available since 3.5 (or 49c583ca48c4).
3743 # available since 3.5 (or 49c583ca48c4).
3742 def openrevlog(orig, repo, cmd, file_, opts):
3744 def openrevlog(orig, repo, cmd, file_, opts):
3743 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3745 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3744 raise error.Abort(
3746 raise error.Abort(
3745 b"This version doesn't support --dir option",
3747 b"This version doesn't support --dir option",
3746 hint=b"use 3.5 or later",
3748 hint=b"use 3.5 or later",
3747 )
3749 )
3748 return orig(repo, cmd, file_, opts)
3750 return orig(repo, cmd, file_, opts)
3749
3751
3750 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3752 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3751
3753
3752
3754
3753 @command(
3755 @command(
3754 b'perfprogress',
3756 b'perfprogress',
3755 formatteropts
3757 formatteropts
3756 + [
3758 + [
3757 (b'', b'topic', b'topic', b'topic for progress messages'),
3759 (b'', b'topic', b'topic', b'topic for progress messages'),
3758 (b'c', b'total', 1000000, b'total value we are progressing to'),
3760 (b'c', b'total', 1000000, b'total value we are progressing to'),
3759 ],
3761 ],
3760 norepo=True,
3762 norepo=True,
3761 )
3763 )
3762 def perfprogress(ui, topic=None, total=None, **opts):
3764 def perfprogress(ui, topic=None, total=None, **opts):
3763 """printing of progress bars"""
3765 """printing of progress bars"""
3764 opts = _byteskwargs(opts)
3766 opts = _byteskwargs(opts)
3765
3767
3766 timer, fm = gettimer(ui, opts)
3768 timer, fm = gettimer(ui, opts)
3767
3769
3768 def doprogress():
3770 def doprogress():
3769 with ui.makeprogress(topic, total=total) as progress:
3771 with ui.makeprogress(topic, total=total) as progress:
3770 for i in _xrange(total):
3772 for i in _xrange(total):
3771 progress.increment()
3773 progress.increment()
3772
3774
3773 timer(doprogress)
3775 timer(doprogress)
3774 fm.end()
3776 fm.end()
@@ -1,396 +1,396 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perfaddremove
81 perfaddremove
82 (no help text available)
82 (no help text available)
83 perfancestors
83 perfancestors
84 (no help text available)
84 (no help text available)
85 perfancestorset
85 perfancestorset
86 (no help text available)
86 (no help text available)
87 perfannotate (no help text available)
87 perfannotate (no help text available)
88 perfbdiff benchmark a bdiff between revisions
88 perfbdiff benchmark a bdiff between revisions
89 perfbookmarks
89 perfbookmarks
90 benchmark parsing bookmarks from disk to memory
90 benchmark parsing bookmarks from disk to memory
91 perfbranchmap
91 perfbranchmap
92 benchmark the update of a branchmap
92 benchmark the update of a branchmap
93 perfbranchmapload
93 perfbranchmapload
94 benchmark reading the branchmap
94 benchmark reading the branchmap
95 perfbranchmapupdate
95 perfbranchmapupdate
96 benchmark branchmap update from for <base> revs to <target>
96 benchmark branchmap update from for <base> revs to <target>
97 revs
97 revs
98 perfbundleread
98 perfbundleread
99 Benchmark reading of bundle files.
99 Benchmark reading of bundle files.
100 perfcca (no help text available)
100 perfcca (no help text available)
101 perfchangegroupchangelog
101 perfchangegroupchangelog
102 Benchmark producing a changelog group for a changegroup.
102 Benchmark producing a changelog group for a changegroup.
103 perfchangeset
103 perfchangeset
104 (no help text available)
104 (no help text available)
105 perfctxfiles (no help text available)
105 perfctxfiles (no help text available)
106 perfdiffwd Profile diff of working directory changes
106 perfdiffwd Profile diff of working directory changes
107 perfdirfoldmap
107 perfdirfoldmap
108 benchmap a 'dirstate._map.dirfoldmap.get()' request
108 benchmap a 'dirstate._map.dirfoldmap.get()' request
109 perfdirs (no help text available)
109 perfdirs (no help text available)
110 perfdirstate benchmap the time necessary to load a dirstate from scratch
110 perfdirstate benchmap the time necessary to load a dirstate from scratch
111 perfdirstatedirs
111 perfdirstatedirs
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
113 perfdirstatefoldmap
113 perfdirstatefoldmap
114 benchmap a 'dirstate._map.filefoldmap.get()' request
114 benchmap a 'dirstate._map.filefoldmap.get()' request
115 perfdirstatewrite
115 perfdirstatewrite
116 (no help text available)
116 benchmap the time it take to write a dirstate on disk
117 perfdiscovery
117 perfdiscovery
118 benchmark discovery between local repo and the peer at given
118 benchmark discovery between local repo and the peer at given
119 path
119 path
120 perffncacheencode
120 perffncacheencode
121 (no help text available)
121 (no help text available)
122 perffncacheload
122 perffncacheload
123 (no help text available)
123 (no help text available)
124 perffncachewrite
124 perffncachewrite
125 (no help text available)
125 (no help text available)
126 perfheads benchmark the computation of a changelog heads
126 perfheads benchmark the computation of a changelog heads
127 perfhelper-mergecopies
127 perfhelper-mergecopies
128 find statistics about potential parameters for
128 find statistics about potential parameters for
129 'perfmergecopies'
129 'perfmergecopies'
130 perfhelper-pathcopies
130 perfhelper-pathcopies
131 find statistic about potential parameters for the
131 find statistic about potential parameters for the
132 'perftracecopies'
132 'perftracecopies'
133 perfignore benchmark operation related to computing ignore
133 perfignore benchmark operation related to computing ignore
134 perfindex benchmark index creation time followed by a lookup
134 perfindex benchmark index creation time followed by a lookup
135 perflinelogedits
135 perflinelogedits
136 (no help text available)
136 (no help text available)
137 perfloadmarkers
137 perfloadmarkers
138 benchmark the time to parse the on-disk markers for a repo
138 benchmark the time to parse the on-disk markers for a repo
139 perflog (no help text available)
139 perflog (no help text available)
140 perflookup (no help text available)
140 perflookup (no help text available)
141 perflrucachedict
141 perflrucachedict
142 (no help text available)
142 (no help text available)
143 perfmanifest benchmark the time to read a manifest from disk and return a
143 perfmanifest benchmark the time to read a manifest from disk and return a
144 usable
144 usable
145 perfmergecalculate
145 perfmergecalculate
146 (no help text available)
146 (no help text available)
147 perfmergecopies
147 perfmergecopies
148 measure runtime of 'copies.mergecopies'
148 measure runtime of 'copies.mergecopies'
149 perfmoonwalk benchmark walking the changelog backwards
149 perfmoonwalk benchmark walking the changelog backwards
150 perfnodelookup
150 perfnodelookup
151 (no help text available)
151 (no help text available)
152 perfnodemap benchmark the time necessary to look up revision from a cold
152 perfnodemap benchmark the time necessary to look up revision from a cold
153 nodemap
153 nodemap
154 perfparents benchmark the time necessary to fetch one changeset's parents.
154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 perfpathcopies
155 perfpathcopies
156 benchmark the copy tracing logic
156 benchmark the copy tracing logic
157 perfphases benchmark phasesets computation
157 perfphases benchmark phasesets computation
158 perfphasesremote
158 perfphasesremote
159 benchmark time needed to analyse phases of the remote server
159 benchmark time needed to analyse phases of the remote server
160 perfprogress printing of progress bars
160 perfprogress printing of progress bars
161 perfrawfiles (no help text available)
161 perfrawfiles (no help text available)
162 perfrevlogchunks
162 perfrevlogchunks
163 Benchmark operations on revlog chunks.
163 Benchmark operations on revlog chunks.
164 perfrevlogindex
164 perfrevlogindex
165 Benchmark operations against a revlog index.
165 Benchmark operations against a revlog index.
166 perfrevlogrevision
166 perfrevlogrevision
167 Benchmark obtaining a revlog revision.
167 Benchmark obtaining a revlog revision.
168 perfrevlogrevisions
168 perfrevlogrevisions
169 Benchmark reading a series of revisions from a revlog.
169 Benchmark reading a series of revisions from a revlog.
170 perfrevlogwrite
170 perfrevlogwrite
171 Benchmark writing a series of revisions to a revlog.
171 Benchmark writing a series of revisions to a revlog.
172 perfrevrange (no help text available)
172 perfrevrange (no help text available)
173 perfrevset benchmark the execution time of a revset
173 perfrevset benchmark the execution time of a revset
174 perfstartup (no help text available)
174 perfstartup (no help text available)
175 perfstatus benchmark the performance of a single status call
175 perfstatus benchmark the performance of a single status call
176 perftags (no help text available)
176 perftags (no help text available)
177 perftemplating
177 perftemplating
178 test the rendering time of a given template
178 test the rendering time of a given template
179 perfunidiff benchmark a unified diff between revisions
179 perfunidiff benchmark a unified diff between revisions
180 perfvolatilesets
180 perfvolatilesets
181 benchmark the computation of various volatile set
181 benchmark the computation of various volatile set
182 perfwalk (no help text available)
182 perfwalk (no help text available)
183 perfwrite microbenchmark ui.write
183 perfwrite microbenchmark ui.write
184
184
185 (use 'hg help -v perf' to show built-in aliases and global options)
185 (use 'hg help -v perf' to show built-in aliases and global options)
186 $ hg perfaddremove
186 $ hg perfaddremove
187 $ hg perfancestors
187 $ hg perfancestors
188 $ hg perfancestorset 2
188 $ hg perfancestorset 2
189 $ hg perfannotate a
189 $ hg perfannotate a
190 $ hg perfbdiff -c 1
190 $ hg perfbdiff -c 1
191 $ hg perfbdiff --alldata 1
191 $ hg perfbdiff --alldata 1
192 $ hg perfunidiff -c 1
192 $ hg perfunidiff -c 1
193 $ hg perfunidiff --alldata 1
193 $ hg perfunidiff --alldata 1
194 $ hg perfbookmarks
194 $ hg perfbookmarks
195 $ hg perfbranchmap
195 $ hg perfbranchmap
196 $ hg perfbranchmapload
196 $ hg perfbranchmapload
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 benchmark of branchmap with 3 revisions with 1 new ones
198 benchmark of branchmap with 3 revisions with 1 new ones
199 $ hg perfcca
199 $ hg perfcca
200 $ hg perfchangegroupchangelog
200 $ hg perfchangegroupchangelog
201 $ hg perfchangegroupchangelog --cgversion 01
201 $ hg perfchangegroupchangelog --cgversion 01
202 $ hg perfchangeset 2
202 $ hg perfchangeset 2
203 $ hg perfctxfiles 2
203 $ hg perfctxfiles 2
204 $ hg perfdiffwd
204 $ hg perfdiffwd
205 $ hg perfdirfoldmap
205 $ hg perfdirfoldmap
206 $ hg perfdirs
206 $ hg perfdirs
207 $ hg perfdirstate
207 $ hg perfdirstate
208 $ hg perfdirstatedirs
208 $ hg perfdirstatedirs
209 $ hg perfdirstatefoldmap
209 $ hg perfdirstatefoldmap
210 $ hg perfdirstatewrite
210 $ hg perfdirstatewrite
211 #if repofncache
211 #if repofncache
212 $ hg perffncacheencode
212 $ hg perffncacheencode
213 $ hg perffncacheload
213 $ hg perffncacheload
214 $ hg debugrebuildfncache
214 $ hg debugrebuildfncache
215 fncache already up to date
215 fncache already up to date
216 $ hg perffncachewrite
216 $ hg perffncachewrite
217 $ hg debugrebuildfncache
217 $ hg debugrebuildfncache
218 fncache already up to date
218 fncache already up to date
219 #endif
219 #endif
220 $ hg perfheads
220 $ hg perfheads
221 $ hg perfignore
221 $ hg perfignore
222 $ hg perfindex
222 $ hg perfindex
223 $ hg perflinelogedits -n 1
223 $ hg perflinelogedits -n 1
224 $ hg perfloadmarkers
224 $ hg perfloadmarkers
225 $ hg perflog
225 $ hg perflog
226 $ hg perflookup 2
226 $ hg perflookup 2
227 $ hg perflrucache
227 $ hg perflrucache
228 $ hg perfmanifest 2
228 $ hg perfmanifest 2
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
230 $ hg perfmanifest -m 44fe2c8352bb
230 $ hg perfmanifest -m 44fe2c8352bb
231 abort: manifest revision must be integer or full node
231 abort: manifest revision must be integer or full node
232 [255]
232 [255]
233 $ hg perfmergecalculate -r 3
233 $ hg perfmergecalculate -r 3
234 $ hg perfmoonwalk
234 $ hg perfmoonwalk
235 $ hg perfnodelookup 2
235 $ hg perfnodelookup 2
236 $ hg perfpathcopies 1 2
236 $ hg perfpathcopies 1 2
237 $ hg perfprogress --total 1000
237 $ hg perfprogress --total 1000
238 $ hg perfrawfiles 2
238 $ hg perfrawfiles 2
239 $ hg perfrevlogindex -c
239 $ hg perfrevlogindex -c
240 #if reporevlogstore
240 #if reporevlogstore
241 $ hg perfrevlogrevisions .hg/store/data/a.i
241 $ hg perfrevlogrevisions .hg/store/data/a.i
242 #endif
242 #endif
243 $ hg perfrevlogrevision -m 0
243 $ hg perfrevlogrevision -m 0
244 $ hg perfrevlogchunks -c
244 $ hg perfrevlogchunks -c
245 $ hg perfrevrange
245 $ hg perfrevrange
246 $ hg perfrevset 'all()'
246 $ hg perfrevset 'all()'
247 $ hg perfstartup
247 $ hg perfstartup
248 $ hg perfstatus
248 $ hg perfstatus
249 $ hg perftags
249 $ hg perftags
250 $ hg perftemplating
250 $ hg perftemplating
251 $ hg perfvolatilesets
251 $ hg perfvolatilesets
252 $ hg perfwalk
252 $ hg perfwalk
253 $ hg perfparents
253 $ hg perfparents
254 $ hg perfdiscovery -q .
254 $ hg perfdiscovery -q .
255
255
256 Test run control
256 Test run control
257 ----------------
257 ----------------
258
258
259 Simple single entry
259 Simple single entry
260
260
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
262 ! wall * comb * user * sys * (best of 15) (glob)
262 ! wall * comb * user * sys * (best of 15) (glob)
263
263
264 Multiple entries
264 Multiple entries
265
265
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
267 ! wall * comb * user * sys * (best of 5) (glob)
267 ! wall * comb * user * sys * (best of 5) (glob)
268
268
269 error case are ignored
269 error case are ignored
270
270
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
272 malformatted run limit entry, missing "-": 500
272 malformatted run limit entry, missing "-": 500
273 ! wall * comb * user * sys * (best of 5) (glob)
273 ! wall * comb * user * sys * (best of 5) (glob)
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
277 ! wall * comb * user * sys * (best of 5) (glob)
277 ! wall * comb * user * sys * (best of 5) (glob)
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
280 ! wall * comb * user * sys * (best of 5) (glob)
280 ! wall * comb * user * sys * (best of 5) (glob)
281
281
282 test actual output
282 test actual output
283 ------------------
283 ------------------
284
284
285 normal output:
285 normal output:
286
286
287 $ hg perfheads --config perf.stub=no
287 $ hg perfheads --config perf.stub=no
288 ! wall * comb * user * sys * (best of *) (glob)
288 ! wall * comb * user * sys * (best of *) (glob)
289
289
290 detailed output:
290 detailed output:
291
291
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
293 ! wall * comb * user * sys * (best of *) (glob)
293 ! wall * comb * user * sys * (best of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
297
297
298 test json output
298 test json output
299 ----------------
299 ----------------
300
300
301 normal output:
301 normal output:
302
302
303 $ hg perfheads --template json --config perf.stub=no
303 $ hg perfheads --template json --config perf.stub=no
304 [
304 [
305 {
305 {
306 "comb": *, (glob)
306 "comb": *, (glob)
307 "count": *, (glob)
307 "count": *, (glob)
308 "sys": *, (glob)
308 "sys": *, (glob)
309 "user": *, (glob)
309 "user": *, (glob)
310 "wall": * (glob)
310 "wall": * (glob)
311 }
311 }
312 ]
312 ]
313
313
314 detailed output:
314 detailed output:
315
315
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
317 [
317 [
318 {
318 {
319 "avg.comb": *, (glob)
319 "avg.comb": *, (glob)
320 "avg.count": *, (glob)
320 "avg.count": *, (glob)
321 "avg.sys": *, (glob)
321 "avg.sys": *, (glob)
322 "avg.user": *, (glob)
322 "avg.user": *, (glob)
323 "avg.wall": *, (glob)
323 "avg.wall": *, (glob)
324 "comb": *, (glob)
324 "comb": *, (glob)
325 "count": *, (glob)
325 "count": *, (glob)
326 "max.comb": *, (glob)
326 "max.comb": *, (glob)
327 "max.count": *, (glob)
327 "max.count": *, (glob)
328 "max.sys": *, (glob)
328 "max.sys": *, (glob)
329 "max.user": *, (glob)
329 "max.user": *, (glob)
330 "max.wall": *, (glob)
330 "max.wall": *, (glob)
331 "median.comb": *, (glob)
331 "median.comb": *, (glob)
332 "median.count": *, (glob)
332 "median.count": *, (glob)
333 "median.sys": *, (glob)
333 "median.sys": *, (glob)
334 "median.user": *, (glob)
334 "median.user": *, (glob)
335 "median.wall": *, (glob)
335 "median.wall": *, (glob)
336 "sys": *, (glob)
336 "sys": *, (glob)
337 "user": *, (glob)
337 "user": *, (glob)
338 "wall": * (glob)
338 "wall": * (glob)
339 }
339 }
340 ]
340 ]
341
341
342 Test pre-run feature
342 Test pre-run feature
343 --------------------
343 --------------------
344
344
345 (perf discovery has some spurious output)
345 (perf discovery has some spurious output)
346
346
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
348 ! wall * comb * user * sys * (best of 1) (glob)
348 ! wall * comb * user * sys * (best of 1) (glob)
349 searching for changes
349 searching for changes
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
351 ! wall * comb * user * sys * (best of 1) (glob)
351 ! wall * comb * user * sys * (best of 1) (glob)
352 searching for changes
352 searching for changes
353 searching for changes
353 searching for changes
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
355 ! wall * comb * user * sys * (best of 1) (glob)
355 ! wall * comb * user * sys * (best of 1) (glob)
356 searching for changes
356 searching for changes
357 searching for changes
357 searching for changes
358 searching for changes
358 searching for changes
359 searching for changes
359 searching for changes
360
360
361 test profile-benchmark option
361 test profile-benchmark option
362 ------------------------------
362 ------------------------------
363
363
364 Function to check that statprof ran
364 Function to check that statprof ran
365 $ statprofran () {
365 $ statprofran () {
366 > egrep 'Sample count:|No samples recorded' > /dev/null
366 > egrep 'Sample count:|No samples recorded' > /dev/null
367 > }
367 > }
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
369
369
370 Check perf.py for historical portability
370 Check perf.py for historical portability
371 ----------------------------------------
371 ----------------------------------------
372
372
373 $ cd "$TESTDIR/.."
373 $ cd "$TESTDIR/.."
374
374
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
378 contrib/perf.py:\d+: (re)
378 contrib/perf.py:\d+: (re)
379 > from mercurial import (
379 > from mercurial import (
380 import newer module separately in try clause for early Mercurial
380 import newer module separately in try clause for early Mercurial
381 contrib/perf.py:\d+: (re)
381 contrib/perf.py:\d+: (re)
382 > from mercurial import (
382 > from mercurial import (
383 import newer module separately in try clause for early Mercurial
383 import newer module separately in try clause for early Mercurial
384 contrib/perf.py:\d+: (re)
384 contrib/perf.py:\d+: (re)
385 > origindexpath = orig.opener.join(orig.indexfile)
385 > origindexpath = orig.opener.join(orig.indexfile)
386 use getvfs()/getsvfs() for early Mercurial
386 use getvfs()/getsvfs() for early Mercurial
387 contrib/perf.py:\d+: (re)
387 contrib/perf.py:\d+: (re)
388 > origdatapath = orig.opener.join(orig.datafile)
388 > origdatapath = orig.opener.join(orig.datafile)
389 use getvfs()/getsvfs() for early Mercurial
389 use getvfs()/getsvfs() for early Mercurial
390 contrib/perf.py:\d+: (re)
390 contrib/perf.py:\d+: (re)
391 > vfs = vfsmod.vfs(tmpdir)
391 > vfs = vfsmod.vfs(tmpdir)
392 use getvfs()/getsvfs() for early Mercurial
392 use getvfs()/getsvfs() for early Mercurial
393 contrib/perf.py:\d+: (re)
393 contrib/perf.py:\d+: (re)
394 > vfs.options = getattr(orig.opener, 'options', None)
394 > vfs.options = getattr(orig.opener, 'options', None)
395 use getvfs()/getsvfs() for early Mercurial
395 use getvfs()/getsvfs() for early Mercurial
396 [1]
396 [1]
General Comments 0
You need to be logged in to leave comments. Login now