##// END OF EJS Templates
perf: document `perfdirstatefoldmap`
marmoute -
r43395:56494a2b default
parent child Browse files
Show More
@@ -1,3762 +1,3766
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
767 """benchmark the performance of a single status call
768
768
769 The repository data are preserved between each call.
769 The repository data are preserved between each call.
770
770
771 By default, only the status of the tracked file are requested. If
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
773 """
774 opts = _byteskwargs(opts)
774 opts = _byteskwargs(opts)
775 # m = match.always(repo.root, repo.getcwd())
775 # m = match.always(repo.root, repo.getcwd())
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 # False))))
777 # False))))
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 fm.end()
780 fm.end()
781
781
782
782
783 @command(b'perfaddremove', formatteropts)
783 @command(b'perfaddremove', formatteropts)
784 def perfaddremove(ui, repo, **opts):
784 def perfaddremove(ui, repo, **opts):
785 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
787 try:
787 try:
788 oldquiet = repo.ui.quiet
788 oldquiet = repo.ui.quiet
789 repo.ui.quiet = True
789 repo.ui.quiet = True
790 matcher = scmutil.match(repo[None])
790 matcher = scmutil.match(repo[None])
791 opts[b'dry_run'] = True
791 opts[b'dry_run'] = True
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 uipathfn = scmutil.getuipathfn(repo)
793 uipathfn = scmutil.getuipathfn(repo)
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 else:
795 else:
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 finally:
797 finally:
798 repo.ui.quiet = oldquiet
798 repo.ui.quiet = oldquiet
799 fm.end()
799 fm.end()
800
800
801
801
802 def clearcaches(cl):
802 def clearcaches(cl):
803 # behave somewhat consistently across internal API changes
803 # behave somewhat consistently across internal API changes
804 if util.safehasattr(cl, b'clearcaches'):
804 if util.safehasattr(cl, b'clearcaches'):
805 cl.clearcaches()
805 cl.clearcaches()
806 elif util.safehasattr(cl, b'_nodecache'):
806 elif util.safehasattr(cl, b'_nodecache'):
807 from mercurial.node import nullid, nullrev
807 from mercurial.node import nullid, nullrev
808
808
809 cl._nodecache = {nullid: nullrev}
809 cl._nodecache = {nullid: nullrev}
810 cl._nodepos = None
810 cl._nodepos = None
811
811
812
812
813 @command(b'perfheads', formatteropts)
813 @command(b'perfheads', formatteropts)
814 def perfheads(ui, repo, **opts):
814 def perfheads(ui, repo, **opts):
815 """benchmark the computation of a changelog heads"""
815 """benchmark the computation of a changelog heads"""
816 opts = _byteskwargs(opts)
816 opts = _byteskwargs(opts)
817 timer, fm = gettimer(ui, opts)
817 timer, fm = gettimer(ui, opts)
818 cl = repo.changelog
818 cl = repo.changelog
819
819
820 def s():
820 def s():
821 clearcaches(cl)
821 clearcaches(cl)
822
822
823 def d():
823 def d():
824 len(cl.headrevs())
824 len(cl.headrevs())
825
825
826 timer(d, setup=s)
826 timer(d, setup=s)
827 fm.end()
827 fm.end()
828
828
829
829
830 @command(
830 @command(
831 b'perftags',
831 b'perftags',
832 formatteropts
832 formatteropts
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 )
834 )
835 def perftags(ui, repo, **opts):
835 def perftags(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 repocleartagscache = repocleartagscachefunc(repo)
838 repocleartagscache = repocleartagscachefunc(repo)
839 clearrevlogs = opts[b'clear_revlogs']
839 clearrevlogs = opts[b'clear_revlogs']
840
840
841 def s():
841 def s():
842 if clearrevlogs:
842 if clearrevlogs:
843 clearchangelog(repo)
843 clearchangelog(repo)
844 clearfilecache(repo.unfiltered(), 'manifest')
844 clearfilecache(repo.unfiltered(), 'manifest')
845 repocleartagscache()
845 repocleartagscache()
846
846
847 def t():
847 def t():
848 return len(repo.tags())
848 return len(repo.tags())
849
849
850 timer(t, setup=s)
850 timer(t, setup=s)
851 fm.end()
851 fm.end()
852
852
853
853
854 @command(b'perfancestors', formatteropts)
854 @command(b'perfancestors', formatteropts)
855 def perfancestors(ui, repo, **opts):
855 def perfancestors(ui, repo, **opts):
856 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
858 heads = repo.changelog.headrevs()
858 heads = repo.changelog.headrevs()
859
859
860 def d():
860 def d():
861 for a in repo.changelog.ancestors(heads):
861 for a in repo.changelog.ancestors(heads):
862 pass
862 pass
863
863
864 timer(d)
864 timer(d)
865 fm.end()
865 fm.end()
866
866
867
867
868 @command(b'perfancestorset', formatteropts)
868 @command(b'perfancestorset', formatteropts)
869 def perfancestorset(ui, repo, revset, **opts):
869 def perfancestorset(ui, repo, revset, **opts):
870 opts = _byteskwargs(opts)
870 opts = _byteskwargs(opts)
871 timer, fm = gettimer(ui, opts)
871 timer, fm = gettimer(ui, opts)
872 revs = repo.revs(revset)
872 revs = repo.revs(revset)
873 heads = repo.changelog.headrevs()
873 heads = repo.changelog.headrevs()
874
874
875 def d():
875 def d():
876 s = repo.changelog.ancestors(heads)
876 s = repo.changelog.ancestors(heads)
877 for rev in revs:
877 for rev in revs:
878 rev in s
878 rev in s
879
879
880 timer(d)
880 timer(d)
881 fm.end()
881 fm.end()
882
882
883
883
884 @command(b'perfdiscovery', formatteropts, b'PATH')
884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 def perfdiscovery(ui, repo, path, **opts):
885 def perfdiscovery(ui, repo, path, **opts):
886 """benchmark discovery between local repo and the peer at given path
886 """benchmark discovery between local repo and the peer at given path
887 """
887 """
888 repos = [repo, None]
888 repos = [repo, None]
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 path = ui.expandpath(path)
890 path = ui.expandpath(path)
891
891
892 def s():
892 def s():
893 repos[1] = hg.peer(ui, opts, path)
893 repos[1] = hg.peer(ui, opts, path)
894
894
895 def d():
895 def d():
896 setdiscovery.findcommonheads(ui, *repos)
896 setdiscovery.findcommonheads(ui, *repos)
897
897
898 timer(d, setup=s)
898 timer(d, setup=s)
899 fm.end()
899 fm.end()
900
900
901
901
902 @command(
902 @command(
903 b'perfbookmarks',
903 b'perfbookmarks',
904 formatteropts
904 formatteropts
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 )
906 )
907 def perfbookmarks(ui, repo, **opts):
907 def perfbookmarks(ui, repo, **opts):
908 """benchmark parsing bookmarks from disk to memory"""
908 """benchmark parsing bookmarks from disk to memory"""
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911
911
912 clearrevlogs = opts[b'clear_revlogs']
912 clearrevlogs = opts[b'clear_revlogs']
913
913
914 def s():
914 def s():
915 if clearrevlogs:
915 if clearrevlogs:
916 clearchangelog(repo)
916 clearchangelog(repo)
917 clearfilecache(repo, b'_bookmarks')
917 clearfilecache(repo, b'_bookmarks')
918
918
919 def d():
919 def d():
920 repo._bookmarks
920 repo._bookmarks
921
921
922 timer(d, setup=s)
922 timer(d, setup=s)
923 fm.end()
923 fm.end()
924
924
925
925
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 def perfbundleread(ui, repo, bundlepath, **opts):
927 def perfbundleread(ui, repo, bundlepath, **opts):
928 """Benchmark reading of bundle files.
928 """Benchmark reading of bundle files.
929
929
930 This command is meant to isolate the I/O part of bundle reading as
930 This command is meant to isolate the I/O part of bundle reading as
931 much as possible.
931 much as possible.
932 """
932 """
933 from mercurial import (
933 from mercurial import (
934 bundle2,
934 bundle2,
935 exchange,
935 exchange,
936 streamclone,
936 streamclone,
937 )
937 )
938
938
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940
940
941 def makebench(fn):
941 def makebench(fn):
942 def run():
942 def run():
943 with open(bundlepath, b'rb') as fh:
943 with open(bundlepath, b'rb') as fh:
944 bundle = exchange.readbundle(ui, fh, bundlepath)
944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 fn(bundle)
945 fn(bundle)
946
946
947 return run
947 return run
948
948
949 def makereadnbytes(size):
949 def makereadnbytes(size):
950 def run():
950 def run():
951 with open(bundlepath, b'rb') as fh:
951 with open(bundlepath, b'rb') as fh:
952 bundle = exchange.readbundle(ui, fh, bundlepath)
952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 while bundle.read(size):
953 while bundle.read(size):
954 pass
954 pass
955
955
956 return run
956 return run
957
957
958 def makestdioread(size):
958 def makestdioread(size):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 while fh.read(size):
961 while fh.read(size):
962 pass
962 pass
963
963
964 return run
964 return run
965
965
966 # bundle1
966 # bundle1
967
967
968 def deltaiter(bundle):
968 def deltaiter(bundle):
969 for delta in bundle.deltaiter():
969 for delta in bundle.deltaiter():
970 pass
970 pass
971
971
972 def iterchunks(bundle):
972 def iterchunks(bundle):
973 for chunk in bundle.getchunks():
973 for chunk in bundle.getchunks():
974 pass
974 pass
975
975
976 # bundle2
976 # bundle2
977
977
978 def forwardchunks(bundle):
978 def forwardchunks(bundle):
979 for chunk in bundle._forwardchunks():
979 for chunk in bundle._forwardchunks():
980 pass
980 pass
981
981
982 def iterparts(bundle):
982 def iterparts(bundle):
983 for part in bundle.iterparts():
983 for part in bundle.iterparts():
984 pass
984 pass
985
985
986 def iterpartsseekable(bundle):
986 def iterpartsseekable(bundle):
987 for part in bundle.iterparts(seekable=True):
987 for part in bundle.iterparts(seekable=True):
988 pass
988 pass
989
989
990 def seek(bundle):
990 def seek(bundle):
991 for part in bundle.iterparts(seekable=True):
991 for part in bundle.iterparts(seekable=True):
992 part.seek(0, os.SEEK_END)
992 part.seek(0, os.SEEK_END)
993
993
994 def makepartreadnbytes(size):
994 def makepartreadnbytes(size):
995 def run():
995 def run():
996 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
997 bundle = exchange.readbundle(ui, fh, bundlepath)
997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 for part in bundle.iterparts():
998 for part in bundle.iterparts():
999 while part.read(size):
999 while part.read(size):
1000 pass
1000 pass
1001
1001
1002 return run
1002 return run
1003
1003
1004 benches = [
1004 benches = [
1005 (makestdioread(8192), b'read(8k)'),
1005 (makestdioread(8192), b'read(8k)'),
1006 (makestdioread(16384), b'read(16k)'),
1006 (makestdioread(16384), b'read(16k)'),
1007 (makestdioread(32768), b'read(32k)'),
1007 (makestdioread(32768), b'read(32k)'),
1008 (makestdioread(131072), b'read(128k)'),
1008 (makestdioread(131072), b'read(128k)'),
1009 ]
1009 ]
1010
1010
1011 with open(bundlepath, b'rb') as fh:
1011 with open(bundlepath, b'rb') as fh:
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013
1013
1014 if isinstance(bundle, changegroup.cg1unpacker):
1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 benches.extend(
1015 benches.extend(
1016 [
1016 [
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 ]
1023 ]
1024 )
1024 )
1025 elif isinstance(bundle, bundle2.unbundle20):
1025 elif isinstance(bundle, bundle2.unbundle20):
1026 benches.extend(
1026 benches.extend(
1027 [
1027 [
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 (
1030 (
1031 makebench(iterpartsseekable),
1031 makebench(iterpartsseekable),
1032 b'bundle2 iterparts() seekable',
1032 b'bundle2 iterparts() seekable',
1033 ),
1033 ),
1034 (makebench(seek), b'bundle2 part seek()'),
1034 (makebench(seek), b'bundle2 part seek()'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 ]
1039 ]
1040 )
1040 )
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 raise error.Abort(b'stream clone bundles not supported')
1042 raise error.Abort(b'stream clone bundles not supported')
1043 else:
1043 else:
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045
1045
1046 for fn, title in benches:
1046 for fn, title in benches:
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 timer(fn, title=title)
1048 timer(fn, title=title)
1049 fm.end()
1049 fm.end()
1050
1050
1051
1051
1052 @command(
1052 @command(
1053 b'perfchangegroupchangelog',
1053 b'perfchangegroupchangelog',
1054 formatteropts
1054 formatteropts
1055 + [
1055 + [
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 ],
1058 ],
1059 )
1059 )
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 """Benchmark producing a changelog group for a changegroup.
1061 """Benchmark producing a changelog group for a changegroup.
1062
1062
1063 This measures the time spent processing the changelog during a
1063 This measures the time spent processing the changelog during a
1064 bundle operation. This occurs during `hg bundle` and on a server
1064 bundle operation. This occurs during `hg bundle` and on a server
1065 processing a `getbundle` wire protocol request (handles clones
1065 processing a `getbundle` wire protocol request (handles clones
1066 and pull requests).
1066 and pull requests).
1067
1067
1068 By default, all revisions are added to the changegroup.
1068 By default, all revisions are added to the changegroup.
1069 """
1069 """
1070 opts = _byteskwargs(opts)
1070 opts = _byteskwargs(opts)
1071 cl = repo.changelog
1071 cl = repo.changelog
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 bundler = changegroup.getbundler(cgversion, repo)
1073 bundler = changegroup.getbundler(cgversion, repo)
1074
1074
1075 def d():
1075 def d():
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 for chunk in chunks:
1077 for chunk in chunks:
1078 pass
1078 pass
1079
1079
1080 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1081
1081
1082 # Terminal printing can interfere with timing. So disable it.
1082 # Terminal printing can interfere with timing. So disable it.
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 timer(d)
1084 timer(d)
1085
1085
1086 fm.end()
1086 fm.end()
1087
1087
1088
1088
1089 @command(b'perfdirs', formatteropts)
1089 @command(b'perfdirs', formatteropts)
1090 def perfdirs(ui, repo, **opts):
1090 def perfdirs(ui, repo, **opts):
1091 opts = _byteskwargs(opts)
1091 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1093 dirstate = repo.dirstate
1093 dirstate = repo.dirstate
1094 b'a' in dirstate
1094 b'a' in dirstate
1095
1095
1096 def d():
1096 def d():
1097 dirstate.hasdir(b'a')
1097 dirstate.hasdir(b'a')
1098 del dirstate._map._dirs
1098 del dirstate._map._dirs
1099
1099
1100 timer(d)
1100 timer(d)
1101 fm.end()
1101 fm.end()
1102
1102
1103
1103
1104 @command(b'perfdirstate', formatteropts)
1104 @command(b'perfdirstate', formatteropts)
1105 def perfdirstate(ui, repo, **opts):
1105 def perfdirstate(ui, repo, **opts):
1106 """benchmap the time necessary to load a dirstate from scratch
1106 """benchmap the time necessary to load a dirstate from scratch
1107
1107
1108 The dirstate is loaded to the point were a "contains" request can be
1108 The dirstate is loaded to the point were a "contains" request can be
1109 answered.
1109 answered.
1110 """
1110 """
1111 opts = _byteskwargs(opts)
1111 opts = _byteskwargs(opts)
1112 timer, fm = gettimer(ui, opts)
1112 timer, fm = gettimer(ui, opts)
1113 b"a" in repo.dirstate
1113 b"a" in repo.dirstate
1114
1114
1115 def setup():
1115 def setup():
1116 repo.dirstate.invalidate()
1116 repo.dirstate.invalidate()
1117
1117
1118 def d():
1118 def d():
1119 b"a" in repo.dirstate
1119 b"a" in repo.dirstate
1120
1120
1121 timer(d, setup=setup)
1121 timer(d, setup=setup)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perfdirstatedirs', formatteropts)
1125 @command(b'perfdirstatedirs', formatteropts)
1126 def perfdirstatedirs(ui, repo, **opts):
1126 def perfdirstatedirs(ui, repo, **opts):
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1128 """
1128 """
1129 opts = _byteskwargs(opts)
1129 opts = _byteskwargs(opts)
1130 timer, fm = gettimer(ui, opts)
1130 timer, fm = gettimer(ui, opts)
1131 repo.dirstate.hasdir(b"a")
1131 repo.dirstate.hasdir(b"a")
1132
1132
1133 def setup():
1133 def setup():
1134 del repo.dirstate._map._dirs
1134 del repo.dirstate._map._dirs
1135
1135
1136 def d():
1136 def d():
1137 repo.dirstate.hasdir(b"a")
1137 repo.dirstate.hasdir(b"a")
1138
1138
1139 timer(d, setup=setup)
1139 timer(d, setup=setup)
1140 fm.end()
1140 fm.end()
1141
1141
1142
1142
1143 @command(b'perfdirstatefoldmap', formatteropts)
1143 @command(b'perfdirstatefoldmap', formatteropts)
1144 def perfdirstatefoldmap(ui, repo, **opts):
1144 def perfdirstatefoldmap(ui, repo, **opts):
1145 """benchmap a `dirstate._map.filefoldmap.get()` request
1146
1147 The dirstate filefoldmap cache is dropped between every request.
1148 """
1145 opts = _byteskwargs(opts)
1149 opts = _byteskwargs(opts)
1146 timer, fm = gettimer(ui, opts)
1150 timer, fm = gettimer(ui, opts)
1147 dirstate = repo.dirstate
1151 dirstate = repo.dirstate
1148 b'a' in dirstate
1152 b'a' in dirstate
1149
1153
1150 def d():
1154 def d():
1151 dirstate._map.filefoldmap.get(b'a')
1155 dirstate._map.filefoldmap.get(b'a')
1152 del dirstate._map.filefoldmap
1156 del dirstate._map.filefoldmap
1153
1157
1154 timer(d)
1158 timer(d)
1155 fm.end()
1159 fm.end()
1156
1160
1157
1161
1158 @command(b'perfdirfoldmap', formatteropts)
1162 @command(b'perfdirfoldmap', formatteropts)
1159 def perfdirfoldmap(ui, repo, **opts):
1163 def perfdirfoldmap(ui, repo, **opts):
1160 opts = _byteskwargs(opts)
1164 opts = _byteskwargs(opts)
1161 timer, fm = gettimer(ui, opts)
1165 timer, fm = gettimer(ui, opts)
1162 dirstate = repo.dirstate
1166 dirstate = repo.dirstate
1163 b'a' in dirstate
1167 b'a' in dirstate
1164
1168
1165 def d():
1169 def d():
1166 dirstate._map.dirfoldmap.get(b'a')
1170 dirstate._map.dirfoldmap.get(b'a')
1167 del dirstate._map.dirfoldmap
1171 del dirstate._map.dirfoldmap
1168 del dirstate._map._dirs
1172 del dirstate._map._dirs
1169
1173
1170 timer(d)
1174 timer(d)
1171 fm.end()
1175 fm.end()
1172
1176
1173
1177
1174 @command(b'perfdirstatewrite', formatteropts)
1178 @command(b'perfdirstatewrite', formatteropts)
1175 def perfdirstatewrite(ui, repo, **opts):
1179 def perfdirstatewrite(ui, repo, **opts):
1176 opts = _byteskwargs(opts)
1180 opts = _byteskwargs(opts)
1177 timer, fm = gettimer(ui, opts)
1181 timer, fm = gettimer(ui, opts)
1178 ds = repo.dirstate
1182 ds = repo.dirstate
1179 b"a" in ds
1183 b"a" in ds
1180
1184
1181 def d():
1185 def d():
1182 ds._dirty = True
1186 ds._dirty = True
1183 ds.write(repo.currenttransaction())
1187 ds.write(repo.currenttransaction())
1184
1188
1185 timer(d)
1189 timer(d)
1186 fm.end()
1190 fm.end()
1187
1191
1188
1192
1189 def _getmergerevs(repo, opts):
1193 def _getmergerevs(repo, opts):
1190 """parse command argument to return rev involved in merge
1194 """parse command argument to return rev involved in merge
1191
1195
1192 input: options dictionnary with `rev`, `from` and `bse`
1196 input: options dictionnary with `rev`, `from` and `bse`
1193 output: (localctx, otherctx, basectx)
1197 output: (localctx, otherctx, basectx)
1194 """
1198 """
1195 if opts[b'from']:
1199 if opts[b'from']:
1196 fromrev = scmutil.revsingle(repo, opts[b'from'])
1200 fromrev = scmutil.revsingle(repo, opts[b'from'])
1197 wctx = repo[fromrev]
1201 wctx = repo[fromrev]
1198 else:
1202 else:
1199 wctx = repo[None]
1203 wctx = repo[None]
1200 # we don't want working dir files to be stat'd in the benchmark, so
1204 # we don't want working dir files to be stat'd in the benchmark, so
1201 # prime that cache
1205 # prime that cache
1202 wctx.dirty()
1206 wctx.dirty()
1203 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1207 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1204 if opts[b'base']:
1208 if opts[b'base']:
1205 fromrev = scmutil.revsingle(repo, opts[b'base'])
1209 fromrev = scmutil.revsingle(repo, opts[b'base'])
1206 ancestor = repo[fromrev]
1210 ancestor = repo[fromrev]
1207 else:
1211 else:
1208 ancestor = wctx.ancestor(rctx)
1212 ancestor = wctx.ancestor(rctx)
1209 return (wctx, rctx, ancestor)
1213 return (wctx, rctx, ancestor)
1210
1214
1211
1215
1212 @command(
1216 @command(
1213 b'perfmergecalculate',
1217 b'perfmergecalculate',
1214 [
1218 [
1215 (b'r', b'rev', b'.', b'rev to merge against'),
1219 (b'r', b'rev', b'.', b'rev to merge against'),
1216 (b'', b'from', b'', b'rev to merge from'),
1220 (b'', b'from', b'', b'rev to merge from'),
1217 (b'', b'base', b'', b'the revision to use as base'),
1221 (b'', b'base', b'', b'the revision to use as base'),
1218 ]
1222 ]
1219 + formatteropts,
1223 + formatteropts,
1220 )
1224 )
1221 def perfmergecalculate(ui, repo, **opts):
1225 def perfmergecalculate(ui, repo, **opts):
1222 opts = _byteskwargs(opts)
1226 opts = _byteskwargs(opts)
1223 timer, fm = gettimer(ui, opts)
1227 timer, fm = gettimer(ui, opts)
1224
1228
1225 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1229 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1226
1230
1227 def d():
1231 def d():
1228 # acceptremote is True because we don't want prompts in the middle of
1232 # acceptremote is True because we don't want prompts in the middle of
1229 # our benchmark
1233 # our benchmark
1230 merge.calculateupdates(
1234 merge.calculateupdates(
1231 repo,
1235 repo,
1232 wctx,
1236 wctx,
1233 rctx,
1237 rctx,
1234 [ancestor],
1238 [ancestor],
1235 branchmerge=False,
1239 branchmerge=False,
1236 force=False,
1240 force=False,
1237 acceptremote=True,
1241 acceptremote=True,
1238 followcopies=True,
1242 followcopies=True,
1239 )
1243 )
1240
1244
1241 timer(d)
1245 timer(d)
1242 fm.end()
1246 fm.end()
1243
1247
1244
1248
1245 @command(
1249 @command(
1246 b'perfmergecopies',
1250 b'perfmergecopies',
1247 [
1251 [
1248 (b'r', b'rev', b'.', b'rev to merge against'),
1252 (b'r', b'rev', b'.', b'rev to merge against'),
1249 (b'', b'from', b'', b'rev to merge from'),
1253 (b'', b'from', b'', b'rev to merge from'),
1250 (b'', b'base', b'', b'the revision to use as base'),
1254 (b'', b'base', b'', b'the revision to use as base'),
1251 ]
1255 ]
1252 + formatteropts,
1256 + formatteropts,
1253 )
1257 )
1254 def perfmergecopies(ui, repo, **opts):
1258 def perfmergecopies(ui, repo, **opts):
1255 """measure runtime of `copies.mergecopies`"""
1259 """measure runtime of `copies.mergecopies`"""
1256 opts = _byteskwargs(opts)
1260 opts = _byteskwargs(opts)
1257 timer, fm = gettimer(ui, opts)
1261 timer, fm = gettimer(ui, opts)
1258 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1262 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1259
1263
1260 def d():
1264 def d():
1261 # acceptremote is True because we don't want prompts in the middle of
1265 # acceptremote is True because we don't want prompts in the middle of
1262 # our benchmark
1266 # our benchmark
1263 copies.mergecopies(repo, wctx, rctx, ancestor)
1267 copies.mergecopies(repo, wctx, rctx, ancestor)
1264
1268
1265 timer(d)
1269 timer(d)
1266 fm.end()
1270 fm.end()
1267
1271
1268
1272
1269 @command(b'perfpathcopies', [], b"REV REV")
1273 @command(b'perfpathcopies', [], b"REV REV")
1270 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1274 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1271 """benchmark the copy tracing logic"""
1275 """benchmark the copy tracing logic"""
1272 opts = _byteskwargs(opts)
1276 opts = _byteskwargs(opts)
1273 timer, fm = gettimer(ui, opts)
1277 timer, fm = gettimer(ui, opts)
1274 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1278 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1275 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1279 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1276
1280
1277 def d():
1281 def d():
1278 copies.pathcopies(ctx1, ctx2)
1282 copies.pathcopies(ctx1, ctx2)
1279
1283
1280 timer(d)
1284 timer(d)
1281 fm.end()
1285 fm.end()
1282
1286
1283
1287
1284 @command(
1288 @command(
1285 b'perfphases',
1289 b'perfphases',
1286 [(b'', b'full', False, b'include file reading time too'),],
1290 [(b'', b'full', False, b'include file reading time too'),],
1287 b"",
1291 b"",
1288 )
1292 )
1289 def perfphases(ui, repo, **opts):
1293 def perfphases(ui, repo, **opts):
1290 """benchmark phasesets computation"""
1294 """benchmark phasesets computation"""
1291 opts = _byteskwargs(opts)
1295 opts = _byteskwargs(opts)
1292 timer, fm = gettimer(ui, opts)
1296 timer, fm = gettimer(ui, opts)
1293 _phases = repo._phasecache
1297 _phases = repo._phasecache
1294 full = opts.get(b'full')
1298 full = opts.get(b'full')
1295
1299
1296 def d():
1300 def d():
1297 phases = _phases
1301 phases = _phases
1298 if full:
1302 if full:
1299 clearfilecache(repo, b'_phasecache')
1303 clearfilecache(repo, b'_phasecache')
1300 phases = repo._phasecache
1304 phases = repo._phasecache
1301 phases.invalidate()
1305 phases.invalidate()
1302 phases.loadphaserevs(repo)
1306 phases.loadphaserevs(repo)
1303
1307
1304 timer(d)
1308 timer(d)
1305 fm.end()
1309 fm.end()
1306
1310
1307
1311
1308 @command(b'perfphasesremote', [], b"[DEST]")
1312 @command(b'perfphasesremote', [], b"[DEST]")
1309 def perfphasesremote(ui, repo, dest=None, **opts):
1313 def perfphasesremote(ui, repo, dest=None, **opts):
1310 """benchmark time needed to analyse phases of the remote server"""
1314 """benchmark time needed to analyse phases of the remote server"""
1311 from mercurial.node import bin
1315 from mercurial.node import bin
1312 from mercurial import (
1316 from mercurial import (
1313 exchange,
1317 exchange,
1314 hg,
1318 hg,
1315 phases,
1319 phases,
1316 )
1320 )
1317
1321
1318 opts = _byteskwargs(opts)
1322 opts = _byteskwargs(opts)
1319 timer, fm = gettimer(ui, opts)
1323 timer, fm = gettimer(ui, opts)
1320
1324
1321 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1325 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1322 if not path:
1326 if not path:
1323 raise error.Abort(
1327 raise error.Abort(
1324 b'default repository not configured!',
1328 b'default repository not configured!',
1325 hint=b"see 'hg help config.paths'",
1329 hint=b"see 'hg help config.paths'",
1326 )
1330 )
1327 dest = path.pushloc or path.loc
1331 dest = path.pushloc or path.loc
1328 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1332 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1329 other = hg.peer(repo, opts, dest)
1333 other = hg.peer(repo, opts, dest)
1330
1334
1331 # easier to perform discovery through the operation
1335 # easier to perform discovery through the operation
1332 op = exchange.pushoperation(repo, other)
1336 op = exchange.pushoperation(repo, other)
1333 exchange._pushdiscoverychangeset(op)
1337 exchange._pushdiscoverychangeset(op)
1334
1338
1335 remotesubset = op.fallbackheads
1339 remotesubset = op.fallbackheads
1336
1340
1337 with other.commandexecutor() as e:
1341 with other.commandexecutor() as e:
1338 remotephases = e.callcommand(
1342 remotephases = e.callcommand(
1339 b'listkeys', {b'namespace': b'phases'}
1343 b'listkeys', {b'namespace': b'phases'}
1340 ).result()
1344 ).result()
1341 del other
1345 del other
1342 publishing = remotephases.get(b'publishing', False)
1346 publishing = remotephases.get(b'publishing', False)
1343 if publishing:
1347 if publishing:
1344 ui.statusnoi18n(b'publishing: yes\n')
1348 ui.statusnoi18n(b'publishing: yes\n')
1345 else:
1349 else:
1346 ui.statusnoi18n(b'publishing: no\n')
1350 ui.statusnoi18n(b'publishing: no\n')
1347
1351
1348 nodemap = repo.changelog.nodemap
1352 nodemap = repo.changelog.nodemap
1349 nonpublishroots = 0
1353 nonpublishroots = 0
1350 for nhex, phase in remotephases.iteritems():
1354 for nhex, phase in remotephases.iteritems():
1351 if nhex == b'publishing': # ignore data related to publish option
1355 if nhex == b'publishing': # ignore data related to publish option
1352 continue
1356 continue
1353 node = bin(nhex)
1357 node = bin(nhex)
1354 if node in nodemap and int(phase):
1358 if node in nodemap and int(phase):
1355 nonpublishroots += 1
1359 nonpublishroots += 1
1356 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1360 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1357 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1361 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1358
1362
1359 def d():
1363 def d():
1360 phases.remotephasessummary(repo, remotesubset, remotephases)
1364 phases.remotephasessummary(repo, remotesubset, remotephases)
1361
1365
1362 timer(d)
1366 timer(d)
1363 fm.end()
1367 fm.end()
1364
1368
1365
1369
1366 @command(
1370 @command(
1367 b'perfmanifest',
1371 b'perfmanifest',
1368 [
1372 [
1369 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1373 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1370 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1374 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1371 ]
1375 ]
1372 + formatteropts,
1376 + formatteropts,
1373 b'REV|NODE',
1377 b'REV|NODE',
1374 )
1378 )
1375 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1379 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1376 """benchmark the time to read a manifest from disk and return a usable
1380 """benchmark the time to read a manifest from disk and return a usable
1377 dict-like object
1381 dict-like object
1378
1382
1379 Manifest caches are cleared before retrieval."""
1383 Manifest caches are cleared before retrieval."""
1380 opts = _byteskwargs(opts)
1384 opts = _byteskwargs(opts)
1381 timer, fm = gettimer(ui, opts)
1385 timer, fm = gettimer(ui, opts)
1382 if not manifest_rev:
1386 if not manifest_rev:
1383 ctx = scmutil.revsingle(repo, rev, rev)
1387 ctx = scmutil.revsingle(repo, rev, rev)
1384 t = ctx.manifestnode()
1388 t = ctx.manifestnode()
1385 else:
1389 else:
1386 from mercurial.node import bin
1390 from mercurial.node import bin
1387
1391
1388 if len(rev) == 40:
1392 if len(rev) == 40:
1389 t = bin(rev)
1393 t = bin(rev)
1390 else:
1394 else:
1391 try:
1395 try:
1392 rev = int(rev)
1396 rev = int(rev)
1393
1397
1394 if util.safehasattr(repo.manifestlog, b'getstorage'):
1398 if util.safehasattr(repo.manifestlog, b'getstorage'):
1395 t = repo.manifestlog.getstorage(b'').node(rev)
1399 t = repo.manifestlog.getstorage(b'').node(rev)
1396 else:
1400 else:
1397 t = repo.manifestlog._revlog.lookup(rev)
1401 t = repo.manifestlog._revlog.lookup(rev)
1398 except ValueError:
1402 except ValueError:
1399 raise error.Abort(
1403 raise error.Abort(
1400 b'manifest revision must be integer or full node'
1404 b'manifest revision must be integer or full node'
1401 )
1405 )
1402
1406
1403 def d():
1407 def d():
1404 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1408 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1405 repo.manifestlog[t].read()
1409 repo.manifestlog[t].read()
1406
1410
1407 timer(d)
1411 timer(d)
1408 fm.end()
1412 fm.end()
1409
1413
1410
1414
1411 @command(b'perfchangeset', formatteropts)
1415 @command(b'perfchangeset', formatteropts)
1412 def perfchangeset(ui, repo, rev, **opts):
1416 def perfchangeset(ui, repo, rev, **opts):
1413 opts = _byteskwargs(opts)
1417 opts = _byteskwargs(opts)
1414 timer, fm = gettimer(ui, opts)
1418 timer, fm = gettimer(ui, opts)
1415 n = scmutil.revsingle(repo, rev).node()
1419 n = scmutil.revsingle(repo, rev).node()
1416
1420
1417 def d():
1421 def d():
1418 repo.changelog.read(n)
1422 repo.changelog.read(n)
1419 # repo.changelog._cache = None
1423 # repo.changelog._cache = None
1420
1424
1421 timer(d)
1425 timer(d)
1422 fm.end()
1426 fm.end()
1423
1427
1424
1428
1425 @command(b'perfignore', formatteropts)
1429 @command(b'perfignore', formatteropts)
1426 def perfignore(ui, repo, **opts):
1430 def perfignore(ui, repo, **opts):
1427 """benchmark operation related to computing ignore"""
1431 """benchmark operation related to computing ignore"""
1428 opts = _byteskwargs(opts)
1432 opts = _byteskwargs(opts)
1429 timer, fm = gettimer(ui, opts)
1433 timer, fm = gettimer(ui, opts)
1430 dirstate = repo.dirstate
1434 dirstate = repo.dirstate
1431
1435
1432 def setupone():
1436 def setupone():
1433 dirstate.invalidate()
1437 dirstate.invalidate()
1434 clearfilecache(dirstate, b'_ignore')
1438 clearfilecache(dirstate, b'_ignore')
1435
1439
1436 def runone():
1440 def runone():
1437 dirstate._ignore
1441 dirstate._ignore
1438
1442
1439 timer(runone, setup=setupone, title=b"load")
1443 timer(runone, setup=setupone, title=b"load")
1440 fm.end()
1444 fm.end()
1441
1445
1442
1446
1443 @command(
1447 @command(
1444 b'perfindex',
1448 b'perfindex',
1445 [
1449 [
1446 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1450 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1447 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1451 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1448 ]
1452 ]
1449 + formatteropts,
1453 + formatteropts,
1450 )
1454 )
1451 def perfindex(ui, repo, **opts):
1455 def perfindex(ui, repo, **opts):
1452 """benchmark index creation time followed by a lookup
1456 """benchmark index creation time followed by a lookup
1453
1457
1454 The default is to look `tip` up. Depending on the index implementation,
1458 The default is to look `tip` up. Depending on the index implementation,
1455 the revision looked up can matters. For example, an implementation
1459 the revision looked up can matters. For example, an implementation
1456 scanning the index will have a faster lookup time for `--rev tip` than for
1460 scanning the index will have a faster lookup time for `--rev tip` than for
1457 `--rev 0`. The number of looked up revisions and their order can also
1461 `--rev 0`. The number of looked up revisions and their order can also
1458 matters.
1462 matters.
1459
1463
1460 Example of useful set to test:
1464 Example of useful set to test:
1461 * tip
1465 * tip
1462 * 0
1466 * 0
1463 * -10:
1467 * -10:
1464 * :10
1468 * :10
1465 * -10: + :10
1469 * -10: + :10
1466 * :10: + -10:
1470 * :10: + -10:
1467 * -10000:
1471 * -10000:
1468 * -10000: + 0
1472 * -10000: + 0
1469
1473
1470 It is not currently possible to check for lookup of a missing node. For
1474 It is not currently possible to check for lookup of a missing node. For
1471 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1475 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1472 import mercurial.revlog
1476 import mercurial.revlog
1473
1477
1474 opts = _byteskwargs(opts)
1478 opts = _byteskwargs(opts)
1475 timer, fm = gettimer(ui, opts)
1479 timer, fm = gettimer(ui, opts)
1476 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1480 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1477 if opts[b'no_lookup']:
1481 if opts[b'no_lookup']:
1478 if opts['rev']:
1482 if opts['rev']:
1479 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1483 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1480 nodes = []
1484 nodes = []
1481 elif not opts[b'rev']:
1485 elif not opts[b'rev']:
1482 nodes = [repo[b"tip"].node()]
1486 nodes = [repo[b"tip"].node()]
1483 else:
1487 else:
1484 revs = scmutil.revrange(repo, opts[b'rev'])
1488 revs = scmutil.revrange(repo, opts[b'rev'])
1485 cl = repo.changelog
1489 cl = repo.changelog
1486 nodes = [cl.node(r) for r in revs]
1490 nodes = [cl.node(r) for r in revs]
1487
1491
1488 unfi = repo.unfiltered()
1492 unfi = repo.unfiltered()
1489 # find the filecache func directly
1493 # find the filecache func directly
1490 # This avoid polluting the benchmark with the filecache logic
1494 # This avoid polluting the benchmark with the filecache logic
1491 makecl = unfi.__class__.changelog.func
1495 makecl = unfi.__class__.changelog.func
1492
1496
1493 def setup():
1497 def setup():
1494 # probably not necessary, but for good measure
1498 # probably not necessary, but for good measure
1495 clearchangelog(unfi)
1499 clearchangelog(unfi)
1496
1500
1497 def d():
1501 def d():
1498 cl = makecl(unfi)
1502 cl = makecl(unfi)
1499 for n in nodes:
1503 for n in nodes:
1500 cl.rev(n)
1504 cl.rev(n)
1501
1505
1502 timer(d, setup=setup)
1506 timer(d, setup=setup)
1503 fm.end()
1507 fm.end()
1504
1508
1505
1509
1506 @command(
1510 @command(
1507 b'perfnodemap',
1511 b'perfnodemap',
1508 [
1512 [
1509 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1513 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1510 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1514 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1511 ]
1515 ]
1512 + formatteropts,
1516 + formatteropts,
1513 )
1517 )
1514 def perfnodemap(ui, repo, **opts):
1518 def perfnodemap(ui, repo, **opts):
1515 """benchmark the time necessary to look up revision from a cold nodemap
1519 """benchmark the time necessary to look up revision from a cold nodemap
1516
1520
1517 Depending on the implementation, the amount and order of revision we look
1521 Depending on the implementation, the amount and order of revision we look
1518 up can varies. Example of useful set to test:
1522 up can varies. Example of useful set to test:
1519 * tip
1523 * tip
1520 * 0
1524 * 0
1521 * -10:
1525 * -10:
1522 * :10
1526 * :10
1523 * -10: + :10
1527 * -10: + :10
1524 * :10: + -10:
1528 * :10: + -10:
1525 * -10000:
1529 * -10000:
1526 * -10000: + 0
1530 * -10000: + 0
1527
1531
1528 The command currently focus on valid binary lookup. Benchmarking for
1532 The command currently focus on valid binary lookup. Benchmarking for
1529 hexlookup, prefix lookup and missing lookup would also be valuable.
1533 hexlookup, prefix lookup and missing lookup would also be valuable.
1530 """
1534 """
1531 import mercurial.revlog
1535 import mercurial.revlog
1532
1536
1533 opts = _byteskwargs(opts)
1537 opts = _byteskwargs(opts)
1534 timer, fm = gettimer(ui, opts)
1538 timer, fm = gettimer(ui, opts)
1535 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1539 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1536
1540
1537 unfi = repo.unfiltered()
1541 unfi = repo.unfiltered()
1538 clearcaches = opts['clear_caches']
1542 clearcaches = opts['clear_caches']
1539 # find the filecache func directly
1543 # find the filecache func directly
1540 # This avoid polluting the benchmark with the filecache logic
1544 # This avoid polluting the benchmark with the filecache logic
1541 makecl = unfi.__class__.changelog.func
1545 makecl = unfi.__class__.changelog.func
1542 if not opts[b'rev']:
1546 if not opts[b'rev']:
1543 raise error.Abort('use --rev to specify revisions to look up')
1547 raise error.Abort('use --rev to specify revisions to look up')
1544 revs = scmutil.revrange(repo, opts[b'rev'])
1548 revs = scmutil.revrange(repo, opts[b'rev'])
1545 cl = repo.changelog
1549 cl = repo.changelog
1546 nodes = [cl.node(r) for r in revs]
1550 nodes = [cl.node(r) for r in revs]
1547
1551
1548 # use a list to pass reference to a nodemap from one closure to the next
1552 # use a list to pass reference to a nodemap from one closure to the next
1549 nodeget = [None]
1553 nodeget = [None]
1550
1554
1551 def setnodeget():
1555 def setnodeget():
1552 # probably not necessary, but for good measure
1556 # probably not necessary, but for good measure
1553 clearchangelog(unfi)
1557 clearchangelog(unfi)
1554 nodeget[0] = makecl(unfi).nodemap.get
1558 nodeget[0] = makecl(unfi).nodemap.get
1555
1559
1556 def d():
1560 def d():
1557 get = nodeget[0]
1561 get = nodeget[0]
1558 for n in nodes:
1562 for n in nodes:
1559 get(n)
1563 get(n)
1560
1564
1561 setup = None
1565 setup = None
1562 if clearcaches:
1566 if clearcaches:
1563
1567
1564 def setup():
1568 def setup():
1565 setnodeget()
1569 setnodeget()
1566
1570
1567 else:
1571 else:
1568 setnodeget()
1572 setnodeget()
1569 d() # prewarm the data structure
1573 d() # prewarm the data structure
1570 timer(d, setup=setup)
1574 timer(d, setup=setup)
1571 fm.end()
1575 fm.end()
1572
1576
1573
1577
1574 @command(b'perfstartup', formatteropts)
1578 @command(b'perfstartup', formatteropts)
1575 def perfstartup(ui, repo, **opts):
1579 def perfstartup(ui, repo, **opts):
1576 opts = _byteskwargs(opts)
1580 opts = _byteskwargs(opts)
1577 timer, fm = gettimer(ui, opts)
1581 timer, fm = gettimer(ui, opts)
1578
1582
1579 def d():
1583 def d():
1580 if os.name != r'nt':
1584 if os.name != r'nt':
1581 os.system(
1585 os.system(
1582 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1586 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1583 )
1587 )
1584 else:
1588 else:
1585 os.environ[r'HGRCPATH'] = r' '
1589 os.environ[r'HGRCPATH'] = r' '
1586 os.system(r"%s version -q > NUL" % sys.argv[0])
1590 os.system(r"%s version -q > NUL" % sys.argv[0])
1587
1591
1588 timer(d)
1592 timer(d)
1589 fm.end()
1593 fm.end()
1590
1594
1591
1595
1592 @command(b'perfparents', formatteropts)
1596 @command(b'perfparents', formatteropts)
1593 def perfparents(ui, repo, **opts):
1597 def perfparents(ui, repo, **opts):
1594 """benchmark the time necessary to fetch one changeset's parents.
1598 """benchmark the time necessary to fetch one changeset's parents.
1595
1599
1596 The fetch is done using the `node identifier`, traversing all object layers
1600 The fetch is done using the `node identifier`, traversing all object layers
1597 from the repository object. The first N revisions will be used for this
1601 from the repository object. The first N revisions will be used for this
1598 benchmark. N is controlled by the ``perf.parentscount`` config option
1602 benchmark. N is controlled by the ``perf.parentscount`` config option
1599 (default: 1000).
1603 (default: 1000).
1600 """
1604 """
1601 opts = _byteskwargs(opts)
1605 opts = _byteskwargs(opts)
1602 timer, fm = gettimer(ui, opts)
1606 timer, fm = gettimer(ui, opts)
1603 # control the number of commits perfparents iterates over
1607 # control the number of commits perfparents iterates over
1604 # experimental config: perf.parentscount
1608 # experimental config: perf.parentscount
1605 count = getint(ui, b"perf", b"parentscount", 1000)
1609 count = getint(ui, b"perf", b"parentscount", 1000)
1606 if len(repo.changelog) < count:
1610 if len(repo.changelog) < count:
1607 raise error.Abort(b"repo needs %d commits for this test" % count)
1611 raise error.Abort(b"repo needs %d commits for this test" % count)
1608 repo = repo.unfiltered()
1612 repo = repo.unfiltered()
1609 nl = [repo.changelog.node(i) for i in _xrange(count)]
1613 nl = [repo.changelog.node(i) for i in _xrange(count)]
1610
1614
1611 def d():
1615 def d():
1612 for n in nl:
1616 for n in nl:
1613 repo.changelog.parents(n)
1617 repo.changelog.parents(n)
1614
1618
1615 timer(d)
1619 timer(d)
1616 fm.end()
1620 fm.end()
1617
1621
1618
1622
1619 @command(b'perfctxfiles', formatteropts)
1623 @command(b'perfctxfiles', formatteropts)
1620 def perfctxfiles(ui, repo, x, **opts):
1624 def perfctxfiles(ui, repo, x, **opts):
1621 opts = _byteskwargs(opts)
1625 opts = _byteskwargs(opts)
1622 x = int(x)
1626 x = int(x)
1623 timer, fm = gettimer(ui, opts)
1627 timer, fm = gettimer(ui, opts)
1624
1628
1625 def d():
1629 def d():
1626 len(repo[x].files())
1630 len(repo[x].files())
1627
1631
1628 timer(d)
1632 timer(d)
1629 fm.end()
1633 fm.end()
1630
1634
1631
1635
1632 @command(b'perfrawfiles', formatteropts)
1636 @command(b'perfrawfiles', formatteropts)
1633 def perfrawfiles(ui, repo, x, **opts):
1637 def perfrawfiles(ui, repo, x, **opts):
1634 opts = _byteskwargs(opts)
1638 opts = _byteskwargs(opts)
1635 x = int(x)
1639 x = int(x)
1636 timer, fm = gettimer(ui, opts)
1640 timer, fm = gettimer(ui, opts)
1637 cl = repo.changelog
1641 cl = repo.changelog
1638
1642
1639 def d():
1643 def d():
1640 len(cl.read(x)[3])
1644 len(cl.read(x)[3])
1641
1645
1642 timer(d)
1646 timer(d)
1643 fm.end()
1647 fm.end()
1644
1648
1645
1649
1646 @command(b'perflookup', formatteropts)
1650 @command(b'perflookup', formatteropts)
1647 def perflookup(ui, repo, rev, **opts):
1651 def perflookup(ui, repo, rev, **opts):
1648 opts = _byteskwargs(opts)
1652 opts = _byteskwargs(opts)
1649 timer, fm = gettimer(ui, opts)
1653 timer, fm = gettimer(ui, opts)
1650 timer(lambda: len(repo.lookup(rev)))
1654 timer(lambda: len(repo.lookup(rev)))
1651 fm.end()
1655 fm.end()
1652
1656
1653
1657
1654 @command(
1658 @command(
1655 b'perflinelogedits',
1659 b'perflinelogedits',
1656 [
1660 [
1657 (b'n', b'edits', 10000, b'number of edits'),
1661 (b'n', b'edits', 10000, b'number of edits'),
1658 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1662 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1659 ],
1663 ],
1660 norepo=True,
1664 norepo=True,
1661 )
1665 )
1662 def perflinelogedits(ui, **opts):
1666 def perflinelogedits(ui, **opts):
1663 from mercurial import linelog
1667 from mercurial import linelog
1664
1668
1665 opts = _byteskwargs(opts)
1669 opts = _byteskwargs(opts)
1666
1670
1667 edits = opts[b'edits']
1671 edits = opts[b'edits']
1668 maxhunklines = opts[b'max_hunk_lines']
1672 maxhunklines = opts[b'max_hunk_lines']
1669
1673
1670 maxb1 = 100000
1674 maxb1 = 100000
1671 random.seed(0)
1675 random.seed(0)
1672 randint = random.randint
1676 randint = random.randint
1673 currentlines = 0
1677 currentlines = 0
1674 arglist = []
1678 arglist = []
1675 for rev in _xrange(edits):
1679 for rev in _xrange(edits):
1676 a1 = randint(0, currentlines)
1680 a1 = randint(0, currentlines)
1677 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1681 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1678 b1 = randint(0, maxb1)
1682 b1 = randint(0, maxb1)
1679 b2 = randint(b1, b1 + maxhunklines)
1683 b2 = randint(b1, b1 + maxhunklines)
1680 currentlines += (b2 - b1) - (a2 - a1)
1684 currentlines += (b2 - b1) - (a2 - a1)
1681 arglist.append((rev, a1, a2, b1, b2))
1685 arglist.append((rev, a1, a2, b1, b2))
1682
1686
1683 def d():
1687 def d():
1684 ll = linelog.linelog()
1688 ll = linelog.linelog()
1685 for args in arglist:
1689 for args in arglist:
1686 ll.replacelines(*args)
1690 ll.replacelines(*args)
1687
1691
1688 timer, fm = gettimer(ui, opts)
1692 timer, fm = gettimer(ui, opts)
1689 timer(d)
1693 timer(d)
1690 fm.end()
1694 fm.end()
1691
1695
1692
1696
1693 @command(b'perfrevrange', formatteropts)
1697 @command(b'perfrevrange', formatteropts)
1694 def perfrevrange(ui, repo, *specs, **opts):
1698 def perfrevrange(ui, repo, *specs, **opts):
1695 opts = _byteskwargs(opts)
1699 opts = _byteskwargs(opts)
1696 timer, fm = gettimer(ui, opts)
1700 timer, fm = gettimer(ui, opts)
1697 revrange = scmutil.revrange
1701 revrange = scmutil.revrange
1698 timer(lambda: len(revrange(repo, specs)))
1702 timer(lambda: len(revrange(repo, specs)))
1699 fm.end()
1703 fm.end()
1700
1704
1701
1705
1702 @command(b'perfnodelookup', formatteropts)
1706 @command(b'perfnodelookup', formatteropts)
1703 def perfnodelookup(ui, repo, rev, **opts):
1707 def perfnodelookup(ui, repo, rev, **opts):
1704 opts = _byteskwargs(opts)
1708 opts = _byteskwargs(opts)
1705 timer, fm = gettimer(ui, opts)
1709 timer, fm = gettimer(ui, opts)
1706 import mercurial.revlog
1710 import mercurial.revlog
1707
1711
1708 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1712 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1709 n = scmutil.revsingle(repo, rev).node()
1713 n = scmutil.revsingle(repo, rev).node()
1710 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1714 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1711
1715
1712 def d():
1716 def d():
1713 cl.rev(n)
1717 cl.rev(n)
1714 clearcaches(cl)
1718 clearcaches(cl)
1715
1719
1716 timer(d)
1720 timer(d)
1717 fm.end()
1721 fm.end()
1718
1722
1719
1723
1720 @command(
1724 @command(
1721 b'perflog',
1725 b'perflog',
1722 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1726 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1723 )
1727 )
1724 def perflog(ui, repo, rev=None, **opts):
1728 def perflog(ui, repo, rev=None, **opts):
1725 opts = _byteskwargs(opts)
1729 opts = _byteskwargs(opts)
1726 if rev is None:
1730 if rev is None:
1727 rev = []
1731 rev = []
1728 timer, fm = gettimer(ui, opts)
1732 timer, fm = gettimer(ui, opts)
1729 ui.pushbuffer()
1733 ui.pushbuffer()
1730 timer(
1734 timer(
1731 lambda: commands.log(
1735 lambda: commands.log(
1732 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1736 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1733 )
1737 )
1734 )
1738 )
1735 ui.popbuffer()
1739 ui.popbuffer()
1736 fm.end()
1740 fm.end()
1737
1741
1738
1742
1739 @command(b'perfmoonwalk', formatteropts)
1743 @command(b'perfmoonwalk', formatteropts)
1740 def perfmoonwalk(ui, repo, **opts):
1744 def perfmoonwalk(ui, repo, **opts):
1741 """benchmark walking the changelog backwards
1745 """benchmark walking the changelog backwards
1742
1746
1743 This also loads the changelog data for each revision in the changelog.
1747 This also loads the changelog data for each revision in the changelog.
1744 """
1748 """
1745 opts = _byteskwargs(opts)
1749 opts = _byteskwargs(opts)
1746 timer, fm = gettimer(ui, opts)
1750 timer, fm = gettimer(ui, opts)
1747
1751
1748 def moonwalk():
1752 def moonwalk():
1749 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1753 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1750 ctx = repo[i]
1754 ctx = repo[i]
1751 ctx.branch() # read changelog data (in addition to the index)
1755 ctx.branch() # read changelog data (in addition to the index)
1752
1756
1753 timer(moonwalk)
1757 timer(moonwalk)
1754 fm.end()
1758 fm.end()
1755
1759
1756
1760
1757 @command(
1761 @command(
1758 b'perftemplating',
1762 b'perftemplating',
1759 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1763 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1760 )
1764 )
1761 def perftemplating(ui, repo, testedtemplate=None, **opts):
1765 def perftemplating(ui, repo, testedtemplate=None, **opts):
1762 """test the rendering time of a given template"""
1766 """test the rendering time of a given template"""
1763 if makelogtemplater is None:
1767 if makelogtemplater is None:
1764 raise error.Abort(
1768 raise error.Abort(
1765 b"perftemplating not available with this Mercurial",
1769 b"perftemplating not available with this Mercurial",
1766 hint=b"use 4.3 or later",
1770 hint=b"use 4.3 or later",
1767 )
1771 )
1768
1772
1769 opts = _byteskwargs(opts)
1773 opts = _byteskwargs(opts)
1770
1774
1771 nullui = ui.copy()
1775 nullui = ui.copy()
1772 nullui.fout = open(os.devnull, r'wb')
1776 nullui.fout = open(os.devnull, r'wb')
1773 nullui.disablepager()
1777 nullui.disablepager()
1774 revs = opts.get(b'rev')
1778 revs = opts.get(b'rev')
1775 if not revs:
1779 if not revs:
1776 revs = [b'all()']
1780 revs = [b'all()']
1777 revs = list(scmutil.revrange(repo, revs))
1781 revs = list(scmutil.revrange(repo, revs))
1778
1782
1779 defaulttemplate = (
1783 defaulttemplate = (
1780 b'{date|shortdate} [{rev}:{node|short}]'
1784 b'{date|shortdate} [{rev}:{node|short}]'
1781 b' {author|person}: {desc|firstline}\n'
1785 b' {author|person}: {desc|firstline}\n'
1782 )
1786 )
1783 if testedtemplate is None:
1787 if testedtemplate is None:
1784 testedtemplate = defaulttemplate
1788 testedtemplate = defaulttemplate
1785 displayer = makelogtemplater(nullui, repo, testedtemplate)
1789 displayer = makelogtemplater(nullui, repo, testedtemplate)
1786
1790
1787 def format():
1791 def format():
1788 for r in revs:
1792 for r in revs:
1789 ctx = repo[r]
1793 ctx = repo[r]
1790 displayer.show(ctx)
1794 displayer.show(ctx)
1791 displayer.flush(ctx)
1795 displayer.flush(ctx)
1792
1796
1793 timer, fm = gettimer(ui, opts)
1797 timer, fm = gettimer(ui, opts)
1794 timer(format)
1798 timer(format)
1795 fm.end()
1799 fm.end()
1796
1800
1797
1801
1798 def _displaystats(ui, opts, entries, data):
1802 def _displaystats(ui, opts, entries, data):
1799 pass
1803 pass
1800 # use a second formatter because the data are quite different, not sure
1804 # use a second formatter because the data are quite different, not sure
1801 # how it flies with the templater.
1805 # how it flies with the templater.
1802 fm = ui.formatter(b'perf-stats', opts)
1806 fm = ui.formatter(b'perf-stats', opts)
1803 for key, title in entries:
1807 for key, title in entries:
1804 values = data[key]
1808 values = data[key]
1805 nbvalues = len(data)
1809 nbvalues = len(data)
1806 values.sort()
1810 values.sort()
1807 stats = {
1811 stats = {
1808 'key': key,
1812 'key': key,
1809 'title': title,
1813 'title': title,
1810 'nbitems': len(values),
1814 'nbitems': len(values),
1811 'min': values[0][0],
1815 'min': values[0][0],
1812 '10%': values[(nbvalues * 10) // 100][0],
1816 '10%': values[(nbvalues * 10) // 100][0],
1813 '25%': values[(nbvalues * 25) // 100][0],
1817 '25%': values[(nbvalues * 25) // 100][0],
1814 '50%': values[(nbvalues * 50) // 100][0],
1818 '50%': values[(nbvalues * 50) // 100][0],
1815 '75%': values[(nbvalues * 75) // 100][0],
1819 '75%': values[(nbvalues * 75) // 100][0],
1816 '80%': values[(nbvalues * 80) // 100][0],
1820 '80%': values[(nbvalues * 80) // 100][0],
1817 '85%': values[(nbvalues * 85) // 100][0],
1821 '85%': values[(nbvalues * 85) // 100][0],
1818 '90%': values[(nbvalues * 90) // 100][0],
1822 '90%': values[(nbvalues * 90) // 100][0],
1819 '95%': values[(nbvalues * 95) // 100][0],
1823 '95%': values[(nbvalues * 95) // 100][0],
1820 '99%': values[(nbvalues * 99) // 100][0],
1824 '99%': values[(nbvalues * 99) // 100][0],
1821 'max': values[-1][0],
1825 'max': values[-1][0],
1822 }
1826 }
1823 fm.startitem()
1827 fm.startitem()
1824 fm.data(**stats)
1828 fm.data(**stats)
1825 # make node pretty for the human output
1829 # make node pretty for the human output
1826 fm.plain('### %s (%d items)\n' % (title, len(values)))
1830 fm.plain('### %s (%d items)\n' % (title, len(values)))
1827 lines = [
1831 lines = [
1828 'min',
1832 'min',
1829 '10%',
1833 '10%',
1830 '25%',
1834 '25%',
1831 '50%',
1835 '50%',
1832 '75%',
1836 '75%',
1833 '80%',
1837 '80%',
1834 '85%',
1838 '85%',
1835 '90%',
1839 '90%',
1836 '95%',
1840 '95%',
1837 '99%',
1841 '99%',
1838 'max',
1842 'max',
1839 ]
1843 ]
1840 for l in lines:
1844 for l in lines:
1841 fm.plain('%s: %s\n' % (l, stats[l]))
1845 fm.plain('%s: %s\n' % (l, stats[l]))
1842 fm.end()
1846 fm.end()
1843
1847
1844
1848
1845 @command(
1849 @command(
1846 b'perfhelper-mergecopies',
1850 b'perfhelper-mergecopies',
1847 formatteropts
1851 formatteropts
1848 + [
1852 + [
1849 (b'r', b'revs', [], b'restrict search to these revisions'),
1853 (b'r', b'revs', [], b'restrict search to these revisions'),
1850 (b'', b'timing', False, b'provides extra data (costly)'),
1854 (b'', b'timing', False, b'provides extra data (costly)'),
1851 (b'', b'stats', False, b'provides statistic about the measured data'),
1855 (b'', b'stats', False, b'provides statistic about the measured data'),
1852 ],
1856 ],
1853 )
1857 )
1854 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1858 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1855 """find statistics about potential parameters for `perfmergecopies`
1859 """find statistics about potential parameters for `perfmergecopies`
1856
1860
1857 This command find (base, p1, p2) triplet relevant for copytracing
1861 This command find (base, p1, p2) triplet relevant for copytracing
1858 benchmarking in the context of a merge. It reports values for some of the
1862 benchmarking in the context of a merge. It reports values for some of the
1859 parameters that impact merge copy tracing time during merge.
1863 parameters that impact merge copy tracing time during merge.
1860
1864
1861 If `--timing` is set, rename detection is run and the associated timing
1865 If `--timing` is set, rename detection is run and the associated timing
1862 will be reported. The extra details come at the cost of slower command
1866 will be reported. The extra details come at the cost of slower command
1863 execution.
1867 execution.
1864
1868
1865 Since rename detection is only run once, other factors might easily
1869 Since rename detection is only run once, other factors might easily
1866 affect the precision of the timing. However it should give a good
1870 affect the precision of the timing. However it should give a good
1867 approximation of which revision triplets are very costly.
1871 approximation of which revision triplets are very costly.
1868 """
1872 """
1869 opts = _byteskwargs(opts)
1873 opts = _byteskwargs(opts)
1870 fm = ui.formatter(b'perf', opts)
1874 fm = ui.formatter(b'perf', opts)
1871 dotiming = opts[b'timing']
1875 dotiming = opts[b'timing']
1872 dostats = opts[b'stats']
1876 dostats = opts[b'stats']
1873
1877
1874 output_template = [
1878 output_template = [
1875 ("base", "%(base)12s"),
1879 ("base", "%(base)12s"),
1876 ("p1", "%(p1.node)12s"),
1880 ("p1", "%(p1.node)12s"),
1877 ("p2", "%(p2.node)12s"),
1881 ("p2", "%(p2.node)12s"),
1878 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1882 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1879 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1883 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1880 ("p1.renames", "%(p1.renamedfiles)12d"),
1884 ("p1.renames", "%(p1.renamedfiles)12d"),
1881 ("p1.time", "%(p1.time)12.3f"),
1885 ("p1.time", "%(p1.time)12.3f"),
1882 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1886 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1883 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1887 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1884 ("p2.renames", "%(p2.renamedfiles)12d"),
1888 ("p2.renames", "%(p2.renamedfiles)12d"),
1885 ("p2.time", "%(p2.time)12.3f"),
1889 ("p2.time", "%(p2.time)12.3f"),
1886 ("renames", "%(nbrenamedfiles)12d"),
1890 ("renames", "%(nbrenamedfiles)12d"),
1887 ("total.time", "%(time)12.3f"),
1891 ("total.time", "%(time)12.3f"),
1888 ]
1892 ]
1889 if not dotiming:
1893 if not dotiming:
1890 output_template = [
1894 output_template = [
1891 i
1895 i
1892 for i in output_template
1896 for i in output_template
1893 if not ('time' in i[0] or 'renames' in i[0])
1897 if not ('time' in i[0] or 'renames' in i[0])
1894 ]
1898 ]
1895 header_names = [h for (h, v) in output_template]
1899 header_names = [h for (h, v) in output_template]
1896 output = ' '.join([v for (h, v) in output_template]) + '\n'
1900 output = ' '.join([v for (h, v) in output_template]) + '\n'
1897 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1901 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1898 fm.plain(header % tuple(header_names))
1902 fm.plain(header % tuple(header_names))
1899
1903
1900 if not revs:
1904 if not revs:
1901 revs = ['all()']
1905 revs = ['all()']
1902 revs = scmutil.revrange(repo, revs)
1906 revs = scmutil.revrange(repo, revs)
1903
1907
1904 if dostats:
1908 if dostats:
1905 alldata = {
1909 alldata = {
1906 'nbrevs': [],
1910 'nbrevs': [],
1907 'nbmissingfiles': [],
1911 'nbmissingfiles': [],
1908 }
1912 }
1909 if dotiming:
1913 if dotiming:
1910 alldata['parentnbrenames'] = []
1914 alldata['parentnbrenames'] = []
1911 alldata['totalnbrenames'] = []
1915 alldata['totalnbrenames'] = []
1912 alldata['parenttime'] = []
1916 alldata['parenttime'] = []
1913 alldata['totaltime'] = []
1917 alldata['totaltime'] = []
1914
1918
1915 roi = repo.revs('merge() and %ld', revs)
1919 roi = repo.revs('merge() and %ld', revs)
1916 for r in roi:
1920 for r in roi:
1917 ctx = repo[r]
1921 ctx = repo[r]
1918 p1 = ctx.p1()
1922 p1 = ctx.p1()
1919 p2 = ctx.p2()
1923 p2 = ctx.p2()
1920 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1924 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1921 for b in bases:
1925 for b in bases:
1922 b = repo[b]
1926 b = repo[b]
1923 p1missing = copies._computeforwardmissing(b, p1)
1927 p1missing = copies._computeforwardmissing(b, p1)
1924 p2missing = copies._computeforwardmissing(b, p2)
1928 p2missing = copies._computeforwardmissing(b, p2)
1925 data = {
1929 data = {
1926 b'base': b.hex(),
1930 b'base': b.hex(),
1927 b'p1.node': p1.hex(),
1931 b'p1.node': p1.hex(),
1928 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1932 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1929 b'p1.nbmissingfiles': len(p1missing),
1933 b'p1.nbmissingfiles': len(p1missing),
1930 b'p2.node': p2.hex(),
1934 b'p2.node': p2.hex(),
1931 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1935 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1932 b'p2.nbmissingfiles': len(p2missing),
1936 b'p2.nbmissingfiles': len(p2missing),
1933 }
1937 }
1934 if dostats:
1938 if dostats:
1935 if p1missing:
1939 if p1missing:
1936 alldata['nbrevs'].append(
1940 alldata['nbrevs'].append(
1937 (data['p1.nbrevs'], b.hex(), p1.hex())
1941 (data['p1.nbrevs'], b.hex(), p1.hex())
1938 )
1942 )
1939 alldata['nbmissingfiles'].append(
1943 alldata['nbmissingfiles'].append(
1940 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1944 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1941 )
1945 )
1942 if p2missing:
1946 if p2missing:
1943 alldata['nbrevs'].append(
1947 alldata['nbrevs'].append(
1944 (data['p2.nbrevs'], b.hex(), p2.hex())
1948 (data['p2.nbrevs'], b.hex(), p2.hex())
1945 )
1949 )
1946 alldata['nbmissingfiles'].append(
1950 alldata['nbmissingfiles'].append(
1947 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1951 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1948 )
1952 )
1949 if dotiming:
1953 if dotiming:
1950 begin = util.timer()
1954 begin = util.timer()
1951 mergedata = copies.mergecopies(repo, p1, p2, b)
1955 mergedata = copies.mergecopies(repo, p1, p2, b)
1952 end = util.timer()
1956 end = util.timer()
1953 # not very stable timing since we did only one run
1957 # not very stable timing since we did only one run
1954 data['time'] = end - begin
1958 data['time'] = end - begin
1955 # mergedata contains five dicts: "copy", "movewithdir",
1959 # mergedata contains five dicts: "copy", "movewithdir",
1956 # "diverge", "renamedelete" and "dirmove".
1960 # "diverge", "renamedelete" and "dirmove".
1957 # The first 4 are about renamed file so lets count that.
1961 # The first 4 are about renamed file so lets count that.
1958 renames = len(mergedata[0])
1962 renames = len(mergedata[0])
1959 renames += len(mergedata[1])
1963 renames += len(mergedata[1])
1960 renames += len(mergedata[2])
1964 renames += len(mergedata[2])
1961 renames += len(mergedata[3])
1965 renames += len(mergedata[3])
1962 data['nbrenamedfiles'] = renames
1966 data['nbrenamedfiles'] = renames
1963 begin = util.timer()
1967 begin = util.timer()
1964 p1renames = copies.pathcopies(b, p1)
1968 p1renames = copies.pathcopies(b, p1)
1965 end = util.timer()
1969 end = util.timer()
1966 data['p1.time'] = end - begin
1970 data['p1.time'] = end - begin
1967 begin = util.timer()
1971 begin = util.timer()
1968 p2renames = copies.pathcopies(b, p2)
1972 p2renames = copies.pathcopies(b, p2)
1969 data['p2.time'] = end - begin
1973 data['p2.time'] = end - begin
1970 end = util.timer()
1974 end = util.timer()
1971 data['p1.renamedfiles'] = len(p1renames)
1975 data['p1.renamedfiles'] = len(p1renames)
1972 data['p2.renamedfiles'] = len(p2renames)
1976 data['p2.renamedfiles'] = len(p2renames)
1973
1977
1974 if dostats:
1978 if dostats:
1975 if p1missing:
1979 if p1missing:
1976 alldata['parentnbrenames'].append(
1980 alldata['parentnbrenames'].append(
1977 (data['p1.renamedfiles'], b.hex(), p1.hex())
1981 (data['p1.renamedfiles'], b.hex(), p1.hex())
1978 )
1982 )
1979 alldata['parenttime'].append(
1983 alldata['parenttime'].append(
1980 (data['p1.time'], b.hex(), p1.hex())
1984 (data['p1.time'], b.hex(), p1.hex())
1981 )
1985 )
1982 if p2missing:
1986 if p2missing:
1983 alldata['parentnbrenames'].append(
1987 alldata['parentnbrenames'].append(
1984 (data['p2.renamedfiles'], b.hex(), p2.hex())
1988 (data['p2.renamedfiles'], b.hex(), p2.hex())
1985 )
1989 )
1986 alldata['parenttime'].append(
1990 alldata['parenttime'].append(
1987 (data['p2.time'], b.hex(), p2.hex())
1991 (data['p2.time'], b.hex(), p2.hex())
1988 )
1992 )
1989 if p1missing or p2missing:
1993 if p1missing or p2missing:
1990 alldata['totalnbrenames'].append(
1994 alldata['totalnbrenames'].append(
1991 (
1995 (
1992 data['nbrenamedfiles'],
1996 data['nbrenamedfiles'],
1993 b.hex(),
1997 b.hex(),
1994 p1.hex(),
1998 p1.hex(),
1995 p2.hex(),
1999 p2.hex(),
1996 )
2000 )
1997 )
2001 )
1998 alldata['totaltime'].append(
2002 alldata['totaltime'].append(
1999 (data['time'], b.hex(), p1.hex(), p2.hex())
2003 (data['time'], b.hex(), p1.hex(), p2.hex())
2000 )
2004 )
2001 fm.startitem()
2005 fm.startitem()
2002 fm.data(**data)
2006 fm.data(**data)
2003 # make node pretty for the human output
2007 # make node pretty for the human output
2004 out = data.copy()
2008 out = data.copy()
2005 out['base'] = fm.hexfunc(b.node())
2009 out['base'] = fm.hexfunc(b.node())
2006 out['p1.node'] = fm.hexfunc(p1.node())
2010 out['p1.node'] = fm.hexfunc(p1.node())
2007 out['p2.node'] = fm.hexfunc(p2.node())
2011 out['p2.node'] = fm.hexfunc(p2.node())
2008 fm.plain(output % out)
2012 fm.plain(output % out)
2009
2013
2010 fm.end()
2014 fm.end()
2011 if dostats:
2015 if dostats:
2012 # use a second formatter because the data are quite different, not sure
2016 # use a second formatter because the data are quite different, not sure
2013 # how it flies with the templater.
2017 # how it flies with the templater.
2014 entries = [
2018 entries = [
2015 ('nbrevs', 'number of revision covered'),
2019 ('nbrevs', 'number of revision covered'),
2016 ('nbmissingfiles', 'number of missing files at head'),
2020 ('nbmissingfiles', 'number of missing files at head'),
2017 ]
2021 ]
2018 if dotiming:
2022 if dotiming:
2019 entries.append(
2023 entries.append(
2020 ('parentnbrenames', 'rename from one parent to base')
2024 ('parentnbrenames', 'rename from one parent to base')
2021 )
2025 )
2022 entries.append(('totalnbrenames', 'total number of renames'))
2026 entries.append(('totalnbrenames', 'total number of renames'))
2023 entries.append(('parenttime', 'time for one parent'))
2027 entries.append(('parenttime', 'time for one parent'))
2024 entries.append(('totaltime', 'time for both parents'))
2028 entries.append(('totaltime', 'time for both parents'))
2025 _displaystats(ui, opts, entries, alldata)
2029 _displaystats(ui, opts, entries, alldata)
2026
2030
2027
2031
2028 @command(
2032 @command(
2029 b'perfhelper-pathcopies',
2033 b'perfhelper-pathcopies',
2030 formatteropts
2034 formatteropts
2031 + [
2035 + [
2032 (b'r', b'revs', [], b'restrict search to these revisions'),
2036 (b'r', b'revs', [], b'restrict search to these revisions'),
2033 (b'', b'timing', False, b'provides extra data (costly)'),
2037 (b'', b'timing', False, b'provides extra data (costly)'),
2034 (b'', b'stats', False, b'provides statistic about the measured data'),
2038 (b'', b'stats', False, b'provides statistic about the measured data'),
2035 ],
2039 ],
2036 )
2040 )
2037 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2041 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2038 """find statistic about potential parameters for the `perftracecopies`
2042 """find statistic about potential parameters for the `perftracecopies`
2039
2043
2040 This command find source-destination pair relevant for copytracing testing.
2044 This command find source-destination pair relevant for copytracing testing.
2041 It report value for some of the parameters that impact copy tracing time.
2045 It report value for some of the parameters that impact copy tracing time.
2042
2046
2043 If `--timing` is set, rename detection is run and the associated timing
2047 If `--timing` is set, rename detection is run and the associated timing
2044 will be reported. The extra details comes at the cost of a slower command
2048 will be reported. The extra details comes at the cost of a slower command
2045 execution.
2049 execution.
2046
2050
2047 Since the rename detection is only run once, other factors might easily
2051 Since the rename detection is only run once, other factors might easily
2048 affect the precision of the timing. However it should give a good
2052 affect the precision of the timing. However it should give a good
2049 approximation of which revision pairs are very costly.
2053 approximation of which revision pairs are very costly.
2050 """
2054 """
2051 opts = _byteskwargs(opts)
2055 opts = _byteskwargs(opts)
2052 fm = ui.formatter(b'perf', opts)
2056 fm = ui.formatter(b'perf', opts)
2053 dotiming = opts[b'timing']
2057 dotiming = opts[b'timing']
2054 dostats = opts[b'stats']
2058 dostats = opts[b'stats']
2055
2059
2056 if dotiming:
2060 if dotiming:
2057 header = '%12s %12s %12s %12s %12s %12s\n'
2061 header = '%12s %12s %12s %12s %12s %12s\n'
2058 output = (
2062 output = (
2059 "%(source)12s %(destination)12s "
2063 "%(source)12s %(destination)12s "
2060 "%(nbrevs)12d %(nbmissingfiles)12d "
2064 "%(nbrevs)12d %(nbmissingfiles)12d "
2061 "%(nbrenamedfiles)12d %(time)18.5f\n"
2065 "%(nbrenamedfiles)12d %(time)18.5f\n"
2062 )
2066 )
2063 header_names = (
2067 header_names = (
2064 "source",
2068 "source",
2065 "destination",
2069 "destination",
2066 "nb-revs",
2070 "nb-revs",
2067 "nb-files",
2071 "nb-files",
2068 "nb-renames",
2072 "nb-renames",
2069 "time",
2073 "time",
2070 )
2074 )
2071 fm.plain(header % header_names)
2075 fm.plain(header % header_names)
2072 else:
2076 else:
2073 header = '%12s %12s %12s %12s\n'
2077 header = '%12s %12s %12s %12s\n'
2074 output = (
2078 output = (
2075 "%(source)12s %(destination)12s "
2079 "%(source)12s %(destination)12s "
2076 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2080 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2077 )
2081 )
2078 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2082 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2079
2083
2080 if not revs:
2084 if not revs:
2081 revs = ['all()']
2085 revs = ['all()']
2082 revs = scmutil.revrange(repo, revs)
2086 revs = scmutil.revrange(repo, revs)
2083
2087
2084 if dostats:
2088 if dostats:
2085 alldata = {
2089 alldata = {
2086 'nbrevs': [],
2090 'nbrevs': [],
2087 'nbmissingfiles': [],
2091 'nbmissingfiles': [],
2088 }
2092 }
2089 if dotiming:
2093 if dotiming:
2090 alldata['nbrenames'] = []
2094 alldata['nbrenames'] = []
2091 alldata['time'] = []
2095 alldata['time'] = []
2092
2096
2093 roi = repo.revs('merge() and %ld', revs)
2097 roi = repo.revs('merge() and %ld', revs)
2094 for r in roi:
2098 for r in roi:
2095 ctx = repo[r]
2099 ctx = repo[r]
2096 p1 = ctx.p1().rev()
2100 p1 = ctx.p1().rev()
2097 p2 = ctx.p2().rev()
2101 p2 = ctx.p2().rev()
2098 bases = repo.changelog._commonancestorsheads(p1, p2)
2102 bases = repo.changelog._commonancestorsheads(p1, p2)
2099 for p in (p1, p2):
2103 for p in (p1, p2):
2100 for b in bases:
2104 for b in bases:
2101 base = repo[b]
2105 base = repo[b]
2102 parent = repo[p]
2106 parent = repo[p]
2103 missing = copies._computeforwardmissing(base, parent)
2107 missing = copies._computeforwardmissing(base, parent)
2104 if not missing:
2108 if not missing:
2105 continue
2109 continue
2106 data = {
2110 data = {
2107 b'source': base.hex(),
2111 b'source': base.hex(),
2108 b'destination': parent.hex(),
2112 b'destination': parent.hex(),
2109 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2113 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2110 b'nbmissingfiles': len(missing),
2114 b'nbmissingfiles': len(missing),
2111 }
2115 }
2112 if dostats:
2116 if dostats:
2113 alldata['nbrevs'].append(
2117 alldata['nbrevs'].append(
2114 (data['nbrevs'], base.hex(), parent.hex(),)
2118 (data['nbrevs'], base.hex(), parent.hex(),)
2115 )
2119 )
2116 alldata['nbmissingfiles'].append(
2120 alldata['nbmissingfiles'].append(
2117 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2121 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2118 )
2122 )
2119 if dotiming:
2123 if dotiming:
2120 begin = util.timer()
2124 begin = util.timer()
2121 renames = copies.pathcopies(base, parent)
2125 renames = copies.pathcopies(base, parent)
2122 end = util.timer()
2126 end = util.timer()
2123 # not very stable timing since we did only one run
2127 # not very stable timing since we did only one run
2124 data['time'] = end - begin
2128 data['time'] = end - begin
2125 data['nbrenamedfiles'] = len(renames)
2129 data['nbrenamedfiles'] = len(renames)
2126 if dostats:
2130 if dostats:
2127 alldata['time'].append(
2131 alldata['time'].append(
2128 (data['time'], base.hex(), parent.hex(),)
2132 (data['time'], base.hex(), parent.hex(),)
2129 )
2133 )
2130 alldata['nbrenames'].append(
2134 alldata['nbrenames'].append(
2131 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2135 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2132 )
2136 )
2133 fm.startitem()
2137 fm.startitem()
2134 fm.data(**data)
2138 fm.data(**data)
2135 out = data.copy()
2139 out = data.copy()
2136 out['source'] = fm.hexfunc(base.node())
2140 out['source'] = fm.hexfunc(base.node())
2137 out['destination'] = fm.hexfunc(parent.node())
2141 out['destination'] = fm.hexfunc(parent.node())
2138 fm.plain(output % out)
2142 fm.plain(output % out)
2139
2143
2140 fm.end()
2144 fm.end()
2141 if dostats:
2145 if dostats:
2142 # use a second formatter because the data are quite different, not sure
2146 # use a second formatter because the data are quite different, not sure
2143 # how it flies with the templater.
2147 # how it flies with the templater.
2144 fm = ui.formatter(b'perf', opts)
2148 fm = ui.formatter(b'perf', opts)
2145 entries = [
2149 entries = [
2146 ('nbrevs', 'number of revision covered'),
2150 ('nbrevs', 'number of revision covered'),
2147 ('nbmissingfiles', 'number of missing files at head'),
2151 ('nbmissingfiles', 'number of missing files at head'),
2148 ]
2152 ]
2149 if dotiming:
2153 if dotiming:
2150 entries.append(('nbrenames', 'renamed files'))
2154 entries.append(('nbrenames', 'renamed files'))
2151 entries.append(('time', 'time'))
2155 entries.append(('time', 'time'))
2152 _displaystats(ui, opts, entries, alldata)
2156 _displaystats(ui, opts, entries, alldata)
2153
2157
2154
2158
2155 @command(b'perfcca', formatteropts)
2159 @command(b'perfcca', formatteropts)
2156 def perfcca(ui, repo, **opts):
2160 def perfcca(ui, repo, **opts):
2157 opts = _byteskwargs(opts)
2161 opts = _byteskwargs(opts)
2158 timer, fm = gettimer(ui, opts)
2162 timer, fm = gettimer(ui, opts)
2159 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2163 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2160 fm.end()
2164 fm.end()
2161
2165
2162
2166
2163 @command(b'perffncacheload', formatteropts)
2167 @command(b'perffncacheload', formatteropts)
2164 def perffncacheload(ui, repo, **opts):
2168 def perffncacheload(ui, repo, **opts):
2165 opts = _byteskwargs(opts)
2169 opts = _byteskwargs(opts)
2166 timer, fm = gettimer(ui, opts)
2170 timer, fm = gettimer(ui, opts)
2167 s = repo.store
2171 s = repo.store
2168
2172
2169 def d():
2173 def d():
2170 s.fncache._load()
2174 s.fncache._load()
2171
2175
2172 timer(d)
2176 timer(d)
2173 fm.end()
2177 fm.end()
2174
2178
2175
2179
2176 @command(b'perffncachewrite', formatteropts)
2180 @command(b'perffncachewrite', formatteropts)
2177 def perffncachewrite(ui, repo, **opts):
2181 def perffncachewrite(ui, repo, **opts):
2178 opts = _byteskwargs(opts)
2182 opts = _byteskwargs(opts)
2179 timer, fm = gettimer(ui, opts)
2183 timer, fm = gettimer(ui, opts)
2180 s = repo.store
2184 s = repo.store
2181 lock = repo.lock()
2185 lock = repo.lock()
2182 s.fncache._load()
2186 s.fncache._load()
2183 tr = repo.transaction(b'perffncachewrite')
2187 tr = repo.transaction(b'perffncachewrite')
2184 tr.addbackup(b'fncache')
2188 tr.addbackup(b'fncache')
2185
2189
2186 def d():
2190 def d():
2187 s.fncache._dirty = True
2191 s.fncache._dirty = True
2188 s.fncache.write(tr)
2192 s.fncache.write(tr)
2189
2193
2190 timer(d)
2194 timer(d)
2191 tr.close()
2195 tr.close()
2192 lock.release()
2196 lock.release()
2193 fm.end()
2197 fm.end()
2194
2198
2195
2199
2196 @command(b'perffncacheencode', formatteropts)
2200 @command(b'perffncacheencode', formatteropts)
2197 def perffncacheencode(ui, repo, **opts):
2201 def perffncacheencode(ui, repo, **opts):
2198 opts = _byteskwargs(opts)
2202 opts = _byteskwargs(opts)
2199 timer, fm = gettimer(ui, opts)
2203 timer, fm = gettimer(ui, opts)
2200 s = repo.store
2204 s = repo.store
2201 s.fncache._load()
2205 s.fncache._load()
2202
2206
2203 def d():
2207 def d():
2204 for p in s.fncache.entries:
2208 for p in s.fncache.entries:
2205 s.encode(p)
2209 s.encode(p)
2206
2210
2207 timer(d)
2211 timer(d)
2208 fm.end()
2212 fm.end()
2209
2213
2210
2214
2211 def _bdiffworker(q, blocks, xdiff, ready, done):
2215 def _bdiffworker(q, blocks, xdiff, ready, done):
2212 while not done.is_set():
2216 while not done.is_set():
2213 pair = q.get()
2217 pair = q.get()
2214 while pair is not None:
2218 while pair is not None:
2215 if xdiff:
2219 if xdiff:
2216 mdiff.bdiff.xdiffblocks(*pair)
2220 mdiff.bdiff.xdiffblocks(*pair)
2217 elif blocks:
2221 elif blocks:
2218 mdiff.bdiff.blocks(*pair)
2222 mdiff.bdiff.blocks(*pair)
2219 else:
2223 else:
2220 mdiff.textdiff(*pair)
2224 mdiff.textdiff(*pair)
2221 q.task_done()
2225 q.task_done()
2222 pair = q.get()
2226 pair = q.get()
2223 q.task_done() # for the None one
2227 q.task_done() # for the None one
2224 with ready:
2228 with ready:
2225 ready.wait()
2229 ready.wait()
2226
2230
2227
2231
2228 def _manifestrevision(repo, mnode):
2232 def _manifestrevision(repo, mnode):
2229 ml = repo.manifestlog
2233 ml = repo.manifestlog
2230
2234
2231 if util.safehasattr(ml, b'getstorage'):
2235 if util.safehasattr(ml, b'getstorage'):
2232 store = ml.getstorage(b'')
2236 store = ml.getstorage(b'')
2233 else:
2237 else:
2234 store = ml._revlog
2238 store = ml._revlog
2235
2239
2236 return store.revision(mnode)
2240 return store.revision(mnode)
2237
2241
2238
2242
2239 @command(
2243 @command(
2240 b'perfbdiff',
2244 b'perfbdiff',
2241 revlogopts
2245 revlogopts
2242 + formatteropts
2246 + formatteropts
2243 + [
2247 + [
2244 (
2248 (
2245 b'',
2249 b'',
2246 b'count',
2250 b'count',
2247 1,
2251 1,
2248 b'number of revisions to test (when using --startrev)',
2252 b'number of revisions to test (when using --startrev)',
2249 ),
2253 ),
2250 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2254 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2251 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2255 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2252 (b'', b'blocks', False, b'test computing diffs into blocks'),
2256 (b'', b'blocks', False, b'test computing diffs into blocks'),
2253 (b'', b'xdiff', False, b'use xdiff algorithm'),
2257 (b'', b'xdiff', False, b'use xdiff algorithm'),
2254 ],
2258 ],
2255 b'-c|-m|FILE REV',
2259 b'-c|-m|FILE REV',
2256 )
2260 )
2257 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2261 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2258 """benchmark a bdiff between revisions
2262 """benchmark a bdiff between revisions
2259
2263
2260 By default, benchmark a bdiff between its delta parent and itself.
2264 By default, benchmark a bdiff between its delta parent and itself.
2261
2265
2262 With ``--count``, benchmark bdiffs between delta parents and self for N
2266 With ``--count``, benchmark bdiffs between delta parents and self for N
2263 revisions starting at the specified revision.
2267 revisions starting at the specified revision.
2264
2268
2265 With ``--alldata``, assume the requested revision is a changeset and
2269 With ``--alldata``, assume the requested revision is a changeset and
2266 measure bdiffs for all changes related to that changeset (manifest
2270 measure bdiffs for all changes related to that changeset (manifest
2267 and filelogs).
2271 and filelogs).
2268 """
2272 """
2269 opts = _byteskwargs(opts)
2273 opts = _byteskwargs(opts)
2270
2274
2271 if opts[b'xdiff'] and not opts[b'blocks']:
2275 if opts[b'xdiff'] and not opts[b'blocks']:
2272 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2276 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2273
2277
2274 if opts[b'alldata']:
2278 if opts[b'alldata']:
2275 opts[b'changelog'] = True
2279 opts[b'changelog'] = True
2276
2280
2277 if opts.get(b'changelog') or opts.get(b'manifest'):
2281 if opts.get(b'changelog') or opts.get(b'manifest'):
2278 file_, rev = None, file_
2282 file_, rev = None, file_
2279 elif rev is None:
2283 elif rev is None:
2280 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2284 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2281
2285
2282 blocks = opts[b'blocks']
2286 blocks = opts[b'blocks']
2283 xdiff = opts[b'xdiff']
2287 xdiff = opts[b'xdiff']
2284 textpairs = []
2288 textpairs = []
2285
2289
2286 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2290 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2287
2291
2288 startrev = r.rev(r.lookup(rev))
2292 startrev = r.rev(r.lookup(rev))
2289 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2293 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2290 if opts[b'alldata']:
2294 if opts[b'alldata']:
2291 # Load revisions associated with changeset.
2295 # Load revisions associated with changeset.
2292 ctx = repo[rev]
2296 ctx = repo[rev]
2293 mtext = _manifestrevision(repo, ctx.manifestnode())
2297 mtext = _manifestrevision(repo, ctx.manifestnode())
2294 for pctx in ctx.parents():
2298 for pctx in ctx.parents():
2295 pman = _manifestrevision(repo, pctx.manifestnode())
2299 pman = _manifestrevision(repo, pctx.manifestnode())
2296 textpairs.append((pman, mtext))
2300 textpairs.append((pman, mtext))
2297
2301
2298 # Load filelog revisions by iterating manifest delta.
2302 # Load filelog revisions by iterating manifest delta.
2299 man = ctx.manifest()
2303 man = ctx.manifest()
2300 pman = ctx.p1().manifest()
2304 pman = ctx.p1().manifest()
2301 for filename, change in pman.diff(man).items():
2305 for filename, change in pman.diff(man).items():
2302 fctx = repo.file(filename)
2306 fctx = repo.file(filename)
2303 f1 = fctx.revision(change[0][0] or -1)
2307 f1 = fctx.revision(change[0][0] or -1)
2304 f2 = fctx.revision(change[1][0] or -1)
2308 f2 = fctx.revision(change[1][0] or -1)
2305 textpairs.append((f1, f2))
2309 textpairs.append((f1, f2))
2306 else:
2310 else:
2307 dp = r.deltaparent(rev)
2311 dp = r.deltaparent(rev)
2308 textpairs.append((r.revision(dp), r.revision(rev)))
2312 textpairs.append((r.revision(dp), r.revision(rev)))
2309
2313
2310 withthreads = threads > 0
2314 withthreads = threads > 0
2311 if not withthreads:
2315 if not withthreads:
2312
2316
2313 def d():
2317 def d():
2314 for pair in textpairs:
2318 for pair in textpairs:
2315 if xdiff:
2319 if xdiff:
2316 mdiff.bdiff.xdiffblocks(*pair)
2320 mdiff.bdiff.xdiffblocks(*pair)
2317 elif blocks:
2321 elif blocks:
2318 mdiff.bdiff.blocks(*pair)
2322 mdiff.bdiff.blocks(*pair)
2319 else:
2323 else:
2320 mdiff.textdiff(*pair)
2324 mdiff.textdiff(*pair)
2321
2325
2322 else:
2326 else:
2323 q = queue()
2327 q = queue()
2324 for i in _xrange(threads):
2328 for i in _xrange(threads):
2325 q.put(None)
2329 q.put(None)
2326 ready = threading.Condition()
2330 ready = threading.Condition()
2327 done = threading.Event()
2331 done = threading.Event()
2328 for i in _xrange(threads):
2332 for i in _xrange(threads):
2329 threading.Thread(
2333 threading.Thread(
2330 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2334 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2331 ).start()
2335 ).start()
2332 q.join()
2336 q.join()
2333
2337
2334 def d():
2338 def d():
2335 for pair in textpairs:
2339 for pair in textpairs:
2336 q.put(pair)
2340 q.put(pair)
2337 for i in _xrange(threads):
2341 for i in _xrange(threads):
2338 q.put(None)
2342 q.put(None)
2339 with ready:
2343 with ready:
2340 ready.notify_all()
2344 ready.notify_all()
2341 q.join()
2345 q.join()
2342
2346
2343 timer, fm = gettimer(ui, opts)
2347 timer, fm = gettimer(ui, opts)
2344 timer(d)
2348 timer(d)
2345 fm.end()
2349 fm.end()
2346
2350
2347 if withthreads:
2351 if withthreads:
2348 done.set()
2352 done.set()
2349 for i in _xrange(threads):
2353 for i in _xrange(threads):
2350 q.put(None)
2354 q.put(None)
2351 with ready:
2355 with ready:
2352 ready.notify_all()
2356 ready.notify_all()
2353
2357
2354
2358
2355 @command(
2359 @command(
2356 b'perfunidiff',
2360 b'perfunidiff',
2357 revlogopts
2361 revlogopts
2358 + formatteropts
2362 + formatteropts
2359 + [
2363 + [
2360 (
2364 (
2361 b'',
2365 b'',
2362 b'count',
2366 b'count',
2363 1,
2367 1,
2364 b'number of revisions to test (when using --startrev)',
2368 b'number of revisions to test (when using --startrev)',
2365 ),
2369 ),
2366 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2370 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2367 ],
2371 ],
2368 b'-c|-m|FILE REV',
2372 b'-c|-m|FILE REV',
2369 )
2373 )
2370 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2374 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2371 """benchmark a unified diff between revisions
2375 """benchmark a unified diff between revisions
2372
2376
2373 This doesn't include any copy tracing - it's just a unified diff
2377 This doesn't include any copy tracing - it's just a unified diff
2374 of the texts.
2378 of the texts.
2375
2379
2376 By default, benchmark a diff between its delta parent and itself.
2380 By default, benchmark a diff between its delta parent and itself.
2377
2381
2378 With ``--count``, benchmark diffs between delta parents and self for N
2382 With ``--count``, benchmark diffs between delta parents and self for N
2379 revisions starting at the specified revision.
2383 revisions starting at the specified revision.
2380
2384
2381 With ``--alldata``, assume the requested revision is a changeset and
2385 With ``--alldata``, assume the requested revision is a changeset and
2382 measure diffs for all changes related to that changeset (manifest
2386 measure diffs for all changes related to that changeset (manifest
2383 and filelogs).
2387 and filelogs).
2384 """
2388 """
2385 opts = _byteskwargs(opts)
2389 opts = _byteskwargs(opts)
2386 if opts[b'alldata']:
2390 if opts[b'alldata']:
2387 opts[b'changelog'] = True
2391 opts[b'changelog'] = True
2388
2392
2389 if opts.get(b'changelog') or opts.get(b'manifest'):
2393 if opts.get(b'changelog') or opts.get(b'manifest'):
2390 file_, rev = None, file_
2394 file_, rev = None, file_
2391 elif rev is None:
2395 elif rev is None:
2392 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2396 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2393
2397
2394 textpairs = []
2398 textpairs = []
2395
2399
2396 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2400 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2397
2401
2398 startrev = r.rev(r.lookup(rev))
2402 startrev = r.rev(r.lookup(rev))
2399 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2403 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2400 if opts[b'alldata']:
2404 if opts[b'alldata']:
2401 # Load revisions associated with changeset.
2405 # Load revisions associated with changeset.
2402 ctx = repo[rev]
2406 ctx = repo[rev]
2403 mtext = _manifestrevision(repo, ctx.manifestnode())
2407 mtext = _manifestrevision(repo, ctx.manifestnode())
2404 for pctx in ctx.parents():
2408 for pctx in ctx.parents():
2405 pman = _manifestrevision(repo, pctx.manifestnode())
2409 pman = _manifestrevision(repo, pctx.manifestnode())
2406 textpairs.append((pman, mtext))
2410 textpairs.append((pman, mtext))
2407
2411
2408 # Load filelog revisions by iterating manifest delta.
2412 # Load filelog revisions by iterating manifest delta.
2409 man = ctx.manifest()
2413 man = ctx.manifest()
2410 pman = ctx.p1().manifest()
2414 pman = ctx.p1().manifest()
2411 for filename, change in pman.diff(man).items():
2415 for filename, change in pman.diff(man).items():
2412 fctx = repo.file(filename)
2416 fctx = repo.file(filename)
2413 f1 = fctx.revision(change[0][0] or -1)
2417 f1 = fctx.revision(change[0][0] or -1)
2414 f2 = fctx.revision(change[1][0] or -1)
2418 f2 = fctx.revision(change[1][0] or -1)
2415 textpairs.append((f1, f2))
2419 textpairs.append((f1, f2))
2416 else:
2420 else:
2417 dp = r.deltaparent(rev)
2421 dp = r.deltaparent(rev)
2418 textpairs.append((r.revision(dp), r.revision(rev)))
2422 textpairs.append((r.revision(dp), r.revision(rev)))
2419
2423
2420 def d():
2424 def d():
2421 for left, right in textpairs:
2425 for left, right in textpairs:
2422 # The date strings don't matter, so we pass empty strings.
2426 # The date strings don't matter, so we pass empty strings.
2423 headerlines, hunks = mdiff.unidiff(
2427 headerlines, hunks = mdiff.unidiff(
2424 left, b'', right, b'', b'left', b'right', binary=False
2428 left, b'', right, b'', b'left', b'right', binary=False
2425 )
2429 )
2426 # consume iterators in roughly the way patch.py does
2430 # consume iterators in roughly the way patch.py does
2427 b'\n'.join(headerlines)
2431 b'\n'.join(headerlines)
2428 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2432 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2429
2433
2430 timer, fm = gettimer(ui, opts)
2434 timer, fm = gettimer(ui, opts)
2431 timer(d)
2435 timer(d)
2432 fm.end()
2436 fm.end()
2433
2437
2434
2438
2435 @command(b'perfdiffwd', formatteropts)
2439 @command(b'perfdiffwd', formatteropts)
2436 def perfdiffwd(ui, repo, **opts):
2440 def perfdiffwd(ui, repo, **opts):
2437 """Profile diff of working directory changes"""
2441 """Profile diff of working directory changes"""
2438 opts = _byteskwargs(opts)
2442 opts = _byteskwargs(opts)
2439 timer, fm = gettimer(ui, opts)
2443 timer, fm = gettimer(ui, opts)
2440 options = {
2444 options = {
2441 'w': 'ignore_all_space',
2445 'w': 'ignore_all_space',
2442 'b': 'ignore_space_change',
2446 'b': 'ignore_space_change',
2443 'B': 'ignore_blank_lines',
2447 'B': 'ignore_blank_lines',
2444 }
2448 }
2445
2449
2446 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2450 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2447 opts = dict((options[c], b'1') for c in diffopt)
2451 opts = dict((options[c], b'1') for c in diffopt)
2448
2452
2449 def d():
2453 def d():
2450 ui.pushbuffer()
2454 ui.pushbuffer()
2451 commands.diff(ui, repo, **opts)
2455 commands.diff(ui, repo, **opts)
2452 ui.popbuffer()
2456 ui.popbuffer()
2453
2457
2454 diffopt = diffopt.encode('ascii')
2458 diffopt = diffopt.encode('ascii')
2455 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2459 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2456 timer(d, title=title)
2460 timer(d, title=title)
2457 fm.end()
2461 fm.end()
2458
2462
2459
2463
2460 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2464 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2461 def perfrevlogindex(ui, repo, file_=None, **opts):
2465 def perfrevlogindex(ui, repo, file_=None, **opts):
2462 """Benchmark operations against a revlog index.
2466 """Benchmark operations against a revlog index.
2463
2467
2464 This tests constructing a revlog instance, reading index data,
2468 This tests constructing a revlog instance, reading index data,
2465 parsing index data, and performing various operations related to
2469 parsing index data, and performing various operations related to
2466 index data.
2470 index data.
2467 """
2471 """
2468
2472
2469 opts = _byteskwargs(opts)
2473 opts = _byteskwargs(opts)
2470
2474
2471 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2475 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2472
2476
2473 opener = getattr(rl, 'opener') # trick linter
2477 opener = getattr(rl, 'opener') # trick linter
2474 indexfile = rl.indexfile
2478 indexfile = rl.indexfile
2475 data = opener.read(indexfile)
2479 data = opener.read(indexfile)
2476
2480
2477 header = struct.unpack(b'>I', data[0:4])[0]
2481 header = struct.unpack(b'>I', data[0:4])[0]
2478 version = header & 0xFFFF
2482 version = header & 0xFFFF
2479 if version == 1:
2483 if version == 1:
2480 revlogio = revlog.revlogio()
2484 revlogio = revlog.revlogio()
2481 inline = header & (1 << 16)
2485 inline = header & (1 << 16)
2482 else:
2486 else:
2483 raise error.Abort(b'unsupported revlog version: %d' % version)
2487 raise error.Abort(b'unsupported revlog version: %d' % version)
2484
2488
2485 rllen = len(rl)
2489 rllen = len(rl)
2486
2490
2487 node0 = rl.node(0)
2491 node0 = rl.node(0)
2488 node25 = rl.node(rllen // 4)
2492 node25 = rl.node(rllen // 4)
2489 node50 = rl.node(rllen // 2)
2493 node50 = rl.node(rllen // 2)
2490 node75 = rl.node(rllen // 4 * 3)
2494 node75 = rl.node(rllen // 4 * 3)
2491 node100 = rl.node(rllen - 1)
2495 node100 = rl.node(rllen - 1)
2492
2496
2493 allrevs = range(rllen)
2497 allrevs = range(rllen)
2494 allrevsrev = list(reversed(allrevs))
2498 allrevsrev = list(reversed(allrevs))
2495 allnodes = [rl.node(rev) for rev in range(rllen)]
2499 allnodes = [rl.node(rev) for rev in range(rllen)]
2496 allnodesrev = list(reversed(allnodes))
2500 allnodesrev = list(reversed(allnodes))
2497
2501
2498 def constructor():
2502 def constructor():
2499 revlog.revlog(opener, indexfile)
2503 revlog.revlog(opener, indexfile)
2500
2504
2501 def read():
2505 def read():
2502 with opener(indexfile) as fh:
2506 with opener(indexfile) as fh:
2503 fh.read()
2507 fh.read()
2504
2508
2505 def parseindex():
2509 def parseindex():
2506 revlogio.parseindex(data, inline)
2510 revlogio.parseindex(data, inline)
2507
2511
2508 def getentry(revornode):
2512 def getentry(revornode):
2509 index = revlogio.parseindex(data, inline)[0]
2513 index = revlogio.parseindex(data, inline)[0]
2510 index[revornode]
2514 index[revornode]
2511
2515
2512 def getentries(revs, count=1):
2516 def getentries(revs, count=1):
2513 index = revlogio.parseindex(data, inline)[0]
2517 index = revlogio.parseindex(data, inline)[0]
2514
2518
2515 for i in range(count):
2519 for i in range(count):
2516 for rev in revs:
2520 for rev in revs:
2517 index[rev]
2521 index[rev]
2518
2522
2519 def resolvenode(node):
2523 def resolvenode(node):
2520 nodemap = revlogio.parseindex(data, inline)[1]
2524 nodemap = revlogio.parseindex(data, inline)[1]
2521 # This only works for the C code.
2525 # This only works for the C code.
2522 if nodemap is None:
2526 if nodemap is None:
2523 return
2527 return
2524
2528
2525 try:
2529 try:
2526 nodemap[node]
2530 nodemap[node]
2527 except error.RevlogError:
2531 except error.RevlogError:
2528 pass
2532 pass
2529
2533
2530 def resolvenodes(nodes, count=1):
2534 def resolvenodes(nodes, count=1):
2531 nodemap = revlogio.parseindex(data, inline)[1]
2535 nodemap = revlogio.parseindex(data, inline)[1]
2532 if nodemap is None:
2536 if nodemap is None:
2533 return
2537 return
2534
2538
2535 for i in range(count):
2539 for i in range(count):
2536 for node in nodes:
2540 for node in nodes:
2537 try:
2541 try:
2538 nodemap[node]
2542 nodemap[node]
2539 except error.RevlogError:
2543 except error.RevlogError:
2540 pass
2544 pass
2541
2545
2542 benches = [
2546 benches = [
2543 (constructor, b'revlog constructor'),
2547 (constructor, b'revlog constructor'),
2544 (read, b'read'),
2548 (read, b'read'),
2545 (parseindex, b'create index object'),
2549 (parseindex, b'create index object'),
2546 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2550 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2547 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2551 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2548 (lambda: resolvenode(node0), b'look up node at rev 0'),
2552 (lambda: resolvenode(node0), b'look up node at rev 0'),
2549 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2553 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2550 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2554 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2551 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2555 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2552 (lambda: resolvenode(node100), b'look up node at tip'),
2556 (lambda: resolvenode(node100), b'look up node at tip'),
2553 # 2x variation is to measure caching impact.
2557 # 2x variation is to measure caching impact.
2554 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2558 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2555 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2559 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2556 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2560 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2557 (
2561 (
2558 lambda: resolvenodes(allnodesrev, 2),
2562 lambda: resolvenodes(allnodesrev, 2),
2559 b'look up all nodes 2x (reverse)',
2563 b'look up all nodes 2x (reverse)',
2560 ),
2564 ),
2561 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2565 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2562 (
2566 (
2563 lambda: getentries(allrevs, 2),
2567 lambda: getentries(allrevs, 2),
2564 b'retrieve all index entries 2x (forward)',
2568 b'retrieve all index entries 2x (forward)',
2565 ),
2569 ),
2566 (
2570 (
2567 lambda: getentries(allrevsrev),
2571 lambda: getentries(allrevsrev),
2568 b'retrieve all index entries (reverse)',
2572 b'retrieve all index entries (reverse)',
2569 ),
2573 ),
2570 (
2574 (
2571 lambda: getentries(allrevsrev, 2),
2575 lambda: getentries(allrevsrev, 2),
2572 b'retrieve all index entries 2x (reverse)',
2576 b'retrieve all index entries 2x (reverse)',
2573 ),
2577 ),
2574 ]
2578 ]
2575
2579
2576 for fn, title in benches:
2580 for fn, title in benches:
2577 timer, fm = gettimer(ui, opts)
2581 timer, fm = gettimer(ui, opts)
2578 timer(fn, title=title)
2582 timer(fn, title=title)
2579 fm.end()
2583 fm.end()
2580
2584
2581
2585
2582 @command(
2586 @command(
2583 b'perfrevlogrevisions',
2587 b'perfrevlogrevisions',
2584 revlogopts
2588 revlogopts
2585 + formatteropts
2589 + formatteropts
2586 + [
2590 + [
2587 (b'd', b'dist', 100, b'distance between the revisions'),
2591 (b'd', b'dist', 100, b'distance between the revisions'),
2588 (b's', b'startrev', 0, b'revision to start reading at'),
2592 (b's', b'startrev', 0, b'revision to start reading at'),
2589 (b'', b'reverse', False, b'read in reverse'),
2593 (b'', b'reverse', False, b'read in reverse'),
2590 ],
2594 ],
2591 b'-c|-m|FILE',
2595 b'-c|-m|FILE',
2592 )
2596 )
2593 def perfrevlogrevisions(
2597 def perfrevlogrevisions(
2594 ui, repo, file_=None, startrev=0, reverse=False, **opts
2598 ui, repo, file_=None, startrev=0, reverse=False, **opts
2595 ):
2599 ):
2596 """Benchmark reading a series of revisions from a revlog.
2600 """Benchmark reading a series of revisions from a revlog.
2597
2601
2598 By default, we read every ``-d/--dist`` revision from 0 to tip of
2602 By default, we read every ``-d/--dist`` revision from 0 to tip of
2599 the specified revlog.
2603 the specified revlog.
2600
2604
2601 The start revision can be defined via ``-s/--startrev``.
2605 The start revision can be defined via ``-s/--startrev``.
2602 """
2606 """
2603 opts = _byteskwargs(opts)
2607 opts = _byteskwargs(opts)
2604
2608
2605 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2609 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2606 rllen = getlen(ui)(rl)
2610 rllen = getlen(ui)(rl)
2607
2611
2608 if startrev < 0:
2612 if startrev < 0:
2609 startrev = rllen + startrev
2613 startrev = rllen + startrev
2610
2614
2611 def d():
2615 def d():
2612 rl.clearcaches()
2616 rl.clearcaches()
2613
2617
2614 beginrev = startrev
2618 beginrev = startrev
2615 endrev = rllen
2619 endrev = rllen
2616 dist = opts[b'dist']
2620 dist = opts[b'dist']
2617
2621
2618 if reverse:
2622 if reverse:
2619 beginrev, endrev = endrev - 1, beginrev - 1
2623 beginrev, endrev = endrev - 1, beginrev - 1
2620 dist = -1 * dist
2624 dist = -1 * dist
2621
2625
2622 for x in _xrange(beginrev, endrev, dist):
2626 for x in _xrange(beginrev, endrev, dist):
2623 # Old revisions don't support passing int.
2627 # Old revisions don't support passing int.
2624 n = rl.node(x)
2628 n = rl.node(x)
2625 rl.revision(n)
2629 rl.revision(n)
2626
2630
2627 timer, fm = gettimer(ui, opts)
2631 timer, fm = gettimer(ui, opts)
2628 timer(d)
2632 timer(d)
2629 fm.end()
2633 fm.end()
2630
2634
2631
2635
2632 @command(
2636 @command(
2633 b'perfrevlogwrite',
2637 b'perfrevlogwrite',
2634 revlogopts
2638 revlogopts
2635 + formatteropts
2639 + formatteropts
2636 + [
2640 + [
2637 (b's', b'startrev', 1000, b'revision to start writing at'),
2641 (b's', b'startrev', 1000, b'revision to start writing at'),
2638 (b'', b'stoprev', -1, b'last revision to write'),
2642 (b'', b'stoprev', -1, b'last revision to write'),
2639 (b'', b'count', 3, b'number of passes to perform'),
2643 (b'', b'count', 3, b'number of passes to perform'),
2640 (b'', b'details', False, b'print timing for every revisions tested'),
2644 (b'', b'details', False, b'print timing for every revisions tested'),
2641 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2645 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2642 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2646 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2643 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2647 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2644 ],
2648 ],
2645 b'-c|-m|FILE',
2649 b'-c|-m|FILE',
2646 )
2650 )
2647 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2651 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2648 """Benchmark writing a series of revisions to a revlog.
2652 """Benchmark writing a series of revisions to a revlog.
2649
2653
2650 Possible source values are:
2654 Possible source values are:
2651 * `full`: add from a full text (default).
2655 * `full`: add from a full text (default).
2652 * `parent-1`: add from a delta to the first parent
2656 * `parent-1`: add from a delta to the first parent
2653 * `parent-2`: add from a delta to the second parent if it exists
2657 * `parent-2`: add from a delta to the second parent if it exists
2654 (use a delta from the first parent otherwise)
2658 (use a delta from the first parent otherwise)
2655 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2659 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2656 * `storage`: add from the existing precomputed deltas
2660 * `storage`: add from the existing precomputed deltas
2657
2661
2658 Note: This performance command measures performance in a custom way. As a
2662 Note: This performance command measures performance in a custom way. As a
2659 result some of the global configuration of the 'perf' command does not
2663 result some of the global configuration of the 'perf' command does not
2660 apply to it:
2664 apply to it:
2661
2665
2662 * ``pre-run``: disabled
2666 * ``pre-run``: disabled
2663
2667
2664 * ``profile-benchmark``: disabled
2668 * ``profile-benchmark``: disabled
2665
2669
2666 * ``run-limits``: disabled use --count instead
2670 * ``run-limits``: disabled use --count instead
2667 """
2671 """
2668 opts = _byteskwargs(opts)
2672 opts = _byteskwargs(opts)
2669
2673
2670 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2674 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2671 rllen = getlen(ui)(rl)
2675 rllen = getlen(ui)(rl)
2672 if startrev < 0:
2676 if startrev < 0:
2673 startrev = rllen + startrev
2677 startrev = rllen + startrev
2674 if stoprev < 0:
2678 if stoprev < 0:
2675 stoprev = rllen + stoprev
2679 stoprev = rllen + stoprev
2676
2680
2677 lazydeltabase = opts['lazydeltabase']
2681 lazydeltabase = opts['lazydeltabase']
2678 source = opts['source']
2682 source = opts['source']
2679 clearcaches = opts['clear_caches']
2683 clearcaches = opts['clear_caches']
2680 validsource = (
2684 validsource = (
2681 b'full',
2685 b'full',
2682 b'parent-1',
2686 b'parent-1',
2683 b'parent-2',
2687 b'parent-2',
2684 b'parent-smallest',
2688 b'parent-smallest',
2685 b'storage',
2689 b'storage',
2686 )
2690 )
2687 if source not in validsource:
2691 if source not in validsource:
2688 raise error.Abort('invalid source type: %s' % source)
2692 raise error.Abort('invalid source type: %s' % source)
2689
2693
2690 ### actually gather results
2694 ### actually gather results
2691 count = opts['count']
2695 count = opts['count']
2692 if count <= 0:
2696 if count <= 0:
2693 raise error.Abort('invalide run count: %d' % count)
2697 raise error.Abort('invalide run count: %d' % count)
2694 allresults = []
2698 allresults = []
2695 for c in range(count):
2699 for c in range(count):
2696 timing = _timeonewrite(
2700 timing = _timeonewrite(
2697 ui,
2701 ui,
2698 rl,
2702 rl,
2699 source,
2703 source,
2700 startrev,
2704 startrev,
2701 stoprev,
2705 stoprev,
2702 c + 1,
2706 c + 1,
2703 lazydeltabase=lazydeltabase,
2707 lazydeltabase=lazydeltabase,
2704 clearcaches=clearcaches,
2708 clearcaches=clearcaches,
2705 )
2709 )
2706 allresults.append(timing)
2710 allresults.append(timing)
2707
2711
2708 ### consolidate the results in a single list
2712 ### consolidate the results in a single list
2709 results = []
2713 results = []
2710 for idx, (rev, t) in enumerate(allresults[0]):
2714 for idx, (rev, t) in enumerate(allresults[0]):
2711 ts = [t]
2715 ts = [t]
2712 for other in allresults[1:]:
2716 for other in allresults[1:]:
2713 orev, ot = other[idx]
2717 orev, ot = other[idx]
2714 assert orev == rev
2718 assert orev == rev
2715 ts.append(ot)
2719 ts.append(ot)
2716 results.append((rev, ts))
2720 results.append((rev, ts))
2717 resultcount = len(results)
2721 resultcount = len(results)
2718
2722
2719 ### Compute and display relevant statistics
2723 ### Compute and display relevant statistics
2720
2724
2721 # get a formatter
2725 # get a formatter
2722 fm = ui.formatter(b'perf', opts)
2726 fm = ui.formatter(b'perf', opts)
2723 displayall = ui.configbool(b"perf", b"all-timing", False)
2727 displayall = ui.configbool(b"perf", b"all-timing", False)
2724
2728
2725 # print individual details if requested
2729 # print individual details if requested
2726 if opts['details']:
2730 if opts['details']:
2727 for idx, item in enumerate(results, 1):
2731 for idx, item in enumerate(results, 1):
2728 rev, data = item
2732 rev, data = item
2729 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2733 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2730 formatone(fm, data, title=title, displayall=displayall)
2734 formatone(fm, data, title=title, displayall=displayall)
2731
2735
2732 # sorts results by median time
2736 # sorts results by median time
2733 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2737 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2734 # list of (name, index) to display)
2738 # list of (name, index) to display)
2735 relevants = [
2739 relevants = [
2736 ("min", 0),
2740 ("min", 0),
2737 ("10%", resultcount * 10 // 100),
2741 ("10%", resultcount * 10 // 100),
2738 ("25%", resultcount * 25 // 100),
2742 ("25%", resultcount * 25 // 100),
2739 ("50%", resultcount * 70 // 100),
2743 ("50%", resultcount * 70 // 100),
2740 ("75%", resultcount * 75 // 100),
2744 ("75%", resultcount * 75 // 100),
2741 ("90%", resultcount * 90 // 100),
2745 ("90%", resultcount * 90 // 100),
2742 ("95%", resultcount * 95 // 100),
2746 ("95%", resultcount * 95 // 100),
2743 ("99%", resultcount * 99 // 100),
2747 ("99%", resultcount * 99 // 100),
2744 ("99.9%", resultcount * 999 // 1000),
2748 ("99.9%", resultcount * 999 // 1000),
2745 ("99.99%", resultcount * 9999 // 10000),
2749 ("99.99%", resultcount * 9999 // 10000),
2746 ("99.999%", resultcount * 99999 // 100000),
2750 ("99.999%", resultcount * 99999 // 100000),
2747 ("max", -1),
2751 ("max", -1),
2748 ]
2752 ]
2749 if not ui.quiet:
2753 if not ui.quiet:
2750 for name, idx in relevants:
2754 for name, idx in relevants:
2751 data = results[idx]
2755 data = results[idx]
2752 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2756 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2753 formatone(fm, data[1], title=title, displayall=displayall)
2757 formatone(fm, data[1], title=title, displayall=displayall)
2754
2758
2755 # XXX summing that many float will not be very precise, we ignore this fact
2759 # XXX summing that many float will not be very precise, we ignore this fact
2756 # for now
2760 # for now
2757 totaltime = []
2761 totaltime = []
2758 for item in allresults:
2762 for item in allresults:
2759 totaltime.append(
2763 totaltime.append(
2760 (
2764 (
2761 sum(x[1][0] for x in item),
2765 sum(x[1][0] for x in item),
2762 sum(x[1][1] for x in item),
2766 sum(x[1][1] for x in item),
2763 sum(x[1][2] for x in item),
2767 sum(x[1][2] for x in item),
2764 )
2768 )
2765 )
2769 )
2766 formatone(
2770 formatone(
2767 fm,
2771 fm,
2768 totaltime,
2772 totaltime,
2769 title="total time (%d revs)" % resultcount,
2773 title="total time (%d revs)" % resultcount,
2770 displayall=displayall,
2774 displayall=displayall,
2771 )
2775 )
2772 fm.end()
2776 fm.end()
2773
2777
2774
2778
2775 class _faketr(object):
2779 class _faketr(object):
2776 def add(s, x, y, z=None):
2780 def add(s, x, y, z=None):
2777 return None
2781 return None
2778
2782
2779
2783
2780 def _timeonewrite(
2784 def _timeonewrite(
2781 ui,
2785 ui,
2782 orig,
2786 orig,
2783 source,
2787 source,
2784 startrev,
2788 startrev,
2785 stoprev,
2789 stoprev,
2786 runidx=None,
2790 runidx=None,
2787 lazydeltabase=True,
2791 lazydeltabase=True,
2788 clearcaches=True,
2792 clearcaches=True,
2789 ):
2793 ):
2790 timings = []
2794 timings = []
2791 tr = _faketr()
2795 tr = _faketr()
2792 with _temprevlog(ui, orig, startrev) as dest:
2796 with _temprevlog(ui, orig, startrev) as dest:
2793 dest._lazydeltabase = lazydeltabase
2797 dest._lazydeltabase = lazydeltabase
2794 revs = list(orig.revs(startrev, stoprev))
2798 revs = list(orig.revs(startrev, stoprev))
2795 total = len(revs)
2799 total = len(revs)
2796 topic = 'adding'
2800 topic = 'adding'
2797 if runidx is not None:
2801 if runidx is not None:
2798 topic += ' (run #%d)' % runidx
2802 topic += ' (run #%d)' % runidx
2799 # Support both old and new progress API
2803 # Support both old and new progress API
2800 if util.safehasattr(ui, 'makeprogress'):
2804 if util.safehasattr(ui, 'makeprogress'):
2801 progress = ui.makeprogress(topic, unit='revs', total=total)
2805 progress = ui.makeprogress(topic, unit='revs', total=total)
2802
2806
2803 def updateprogress(pos):
2807 def updateprogress(pos):
2804 progress.update(pos)
2808 progress.update(pos)
2805
2809
2806 def completeprogress():
2810 def completeprogress():
2807 progress.complete()
2811 progress.complete()
2808
2812
2809 else:
2813 else:
2810
2814
2811 def updateprogress(pos):
2815 def updateprogress(pos):
2812 ui.progress(topic, pos, unit='revs', total=total)
2816 ui.progress(topic, pos, unit='revs', total=total)
2813
2817
2814 def completeprogress():
2818 def completeprogress():
2815 ui.progress(topic, None, unit='revs', total=total)
2819 ui.progress(topic, None, unit='revs', total=total)
2816
2820
2817 for idx, rev in enumerate(revs):
2821 for idx, rev in enumerate(revs):
2818 updateprogress(idx)
2822 updateprogress(idx)
2819 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2823 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2820 if clearcaches:
2824 if clearcaches:
2821 dest.index.clearcaches()
2825 dest.index.clearcaches()
2822 dest.clearcaches()
2826 dest.clearcaches()
2823 with timeone() as r:
2827 with timeone() as r:
2824 dest.addrawrevision(*addargs, **addkwargs)
2828 dest.addrawrevision(*addargs, **addkwargs)
2825 timings.append((rev, r[0]))
2829 timings.append((rev, r[0]))
2826 updateprogress(total)
2830 updateprogress(total)
2827 completeprogress()
2831 completeprogress()
2828 return timings
2832 return timings
2829
2833
2830
2834
2831 def _getrevisionseed(orig, rev, tr, source):
2835 def _getrevisionseed(orig, rev, tr, source):
2832 from mercurial.node import nullid
2836 from mercurial.node import nullid
2833
2837
2834 linkrev = orig.linkrev(rev)
2838 linkrev = orig.linkrev(rev)
2835 node = orig.node(rev)
2839 node = orig.node(rev)
2836 p1, p2 = orig.parents(node)
2840 p1, p2 = orig.parents(node)
2837 flags = orig.flags(rev)
2841 flags = orig.flags(rev)
2838 cachedelta = None
2842 cachedelta = None
2839 text = None
2843 text = None
2840
2844
2841 if source == b'full':
2845 if source == b'full':
2842 text = orig.revision(rev)
2846 text = orig.revision(rev)
2843 elif source == b'parent-1':
2847 elif source == b'parent-1':
2844 baserev = orig.rev(p1)
2848 baserev = orig.rev(p1)
2845 cachedelta = (baserev, orig.revdiff(p1, rev))
2849 cachedelta = (baserev, orig.revdiff(p1, rev))
2846 elif source == b'parent-2':
2850 elif source == b'parent-2':
2847 parent = p2
2851 parent = p2
2848 if p2 == nullid:
2852 if p2 == nullid:
2849 parent = p1
2853 parent = p1
2850 baserev = orig.rev(parent)
2854 baserev = orig.rev(parent)
2851 cachedelta = (baserev, orig.revdiff(parent, rev))
2855 cachedelta = (baserev, orig.revdiff(parent, rev))
2852 elif source == b'parent-smallest':
2856 elif source == b'parent-smallest':
2853 p1diff = orig.revdiff(p1, rev)
2857 p1diff = orig.revdiff(p1, rev)
2854 parent = p1
2858 parent = p1
2855 diff = p1diff
2859 diff = p1diff
2856 if p2 != nullid:
2860 if p2 != nullid:
2857 p2diff = orig.revdiff(p2, rev)
2861 p2diff = orig.revdiff(p2, rev)
2858 if len(p1diff) > len(p2diff):
2862 if len(p1diff) > len(p2diff):
2859 parent = p2
2863 parent = p2
2860 diff = p2diff
2864 diff = p2diff
2861 baserev = orig.rev(parent)
2865 baserev = orig.rev(parent)
2862 cachedelta = (baserev, diff)
2866 cachedelta = (baserev, diff)
2863 elif source == b'storage':
2867 elif source == b'storage':
2864 baserev = orig.deltaparent(rev)
2868 baserev = orig.deltaparent(rev)
2865 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2869 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2866
2870
2867 return (
2871 return (
2868 (text, tr, linkrev, p1, p2),
2872 (text, tr, linkrev, p1, p2),
2869 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2873 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2870 )
2874 )
2871
2875
2872
2876
2873 @contextlib.contextmanager
2877 @contextlib.contextmanager
2874 def _temprevlog(ui, orig, truncaterev):
2878 def _temprevlog(ui, orig, truncaterev):
2875 from mercurial import vfs as vfsmod
2879 from mercurial import vfs as vfsmod
2876
2880
2877 if orig._inline:
2881 if orig._inline:
2878 raise error.Abort('not supporting inline revlog (yet)')
2882 raise error.Abort('not supporting inline revlog (yet)')
2879 revlogkwargs = {}
2883 revlogkwargs = {}
2880 k = 'upperboundcomp'
2884 k = 'upperboundcomp'
2881 if util.safehasattr(orig, k):
2885 if util.safehasattr(orig, k):
2882 revlogkwargs[k] = getattr(orig, k)
2886 revlogkwargs[k] = getattr(orig, k)
2883
2887
2884 origindexpath = orig.opener.join(orig.indexfile)
2888 origindexpath = orig.opener.join(orig.indexfile)
2885 origdatapath = orig.opener.join(orig.datafile)
2889 origdatapath = orig.opener.join(orig.datafile)
2886 indexname = 'revlog.i'
2890 indexname = 'revlog.i'
2887 dataname = 'revlog.d'
2891 dataname = 'revlog.d'
2888
2892
2889 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2893 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2890 try:
2894 try:
2891 # copy the data file in a temporary directory
2895 # copy the data file in a temporary directory
2892 ui.debug('copying data in %s\n' % tmpdir)
2896 ui.debug('copying data in %s\n' % tmpdir)
2893 destindexpath = os.path.join(tmpdir, 'revlog.i')
2897 destindexpath = os.path.join(tmpdir, 'revlog.i')
2894 destdatapath = os.path.join(tmpdir, 'revlog.d')
2898 destdatapath = os.path.join(tmpdir, 'revlog.d')
2895 shutil.copyfile(origindexpath, destindexpath)
2899 shutil.copyfile(origindexpath, destindexpath)
2896 shutil.copyfile(origdatapath, destdatapath)
2900 shutil.copyfile(origdatapath, destdatapath)
2897
2901
2898 # remove the data we want to add again
2902 # remove the data we want to add again
2899 ui.debug('truncating data to be rewritten\n')
2903 ui.debug('truncating data to be rewritten\n')
2900 with open(destindexpath, 'ab') as index:
2904 with open(destindexpath, 'ab') as index:
2901 index.seek(0)
2905 index.seek(0)
2902 index.truncate(truncaterev * orig._io.size)
2906 index.truncate(truncaterev * orig._io.size)
2903 with open(destdatapath, 'ab') as data:
2907 with open(destdatapath, 'ab') as data:
2904 data.seek(0)
2908 data.seek(0)
2905 data.truncate(orig.start(truncaterev))
2909 data.truncate(orig.start(truncaterev))
2906
2910
2907 # instantiate a new revlog from the temporary copy
2911 # instantiate a new revlog from the temporary copy
2908 ui.debug('truncating adding to be rewritten\n')
2912 ui.debug('truncating adding to be rewritten\n')
2909 vfs = vfsmod.vfs(tmpdir)
2913 vfs = vfsmod.vfs(tmpdir)
2910 vfs.options = getattr(orig.opener, 'options', None)
2914 vfs.options = getattr(orig.opener, 'options', None)
2911
2915
2912 dest = revlog.revlog(
2916 dest = revlog.revlog(
2913 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2917 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2914 )
2918 )
2915 if dest._inline:
2919 if dest._inline:
2916 raise error.Abort('not supporting inline revlog (yet)')
2920 raise error.Abort('not supporting inline revlog (yet)')
2917 # make sure internals are initialized
2921 # make sure internals are initialized
2918 dest.revision(len(dest) - 1)
2922 dest.revision(len(dest) - 1)
2919 yield dest
2923 yield dest
2920 del dest, vfs
2924 del dest, vfs
2921 finally:
2925 finally:
2922 shutil.rmtree(tmpdir, True)
2926 shutil.rmtree(tmpdir, True)
2923
2927
2924
2928
2925 @command(
2929 @command(
2926 b'perfrevlogchunks',
2930 b'perfrevlogchunks',
2927 revlogopts
2931 revlogopts
2928 + formatteropts
2932 + formatteropts
2929 + [
2933 + [
2930 (b'e', b'engines', b'', b'compression engines to use'),
2934 (b'e', b'engines', b'', b'compression engines to use'),
2931 (b's', b'startrev', 0, b'revision to start at'),
2935 (b's', b'startrev', 0, b'revision to start at'),
2932 ],
2936 ],
2933 b'-c|-m|FILE',
2937 b'-c|-m|FILE',
2934 )
2938 )
2935 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2939 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2936 """Benchmark operations on revlog chunks.
2940 """Benchmark operations on revlog chunks.
2937
2941
2938 Logically, each revlog is a collection of fulltext revisions. However,
2942 Logically, each revlog is a collection of fulltext revisions. However,
2939 stored within each revlog are "chunks" of possibly compressed data. This
2943 stored within each revlog are "chunks" of possibly compressed data. This
2940 data needs to be read and decompressed or compressed and written.
2944 data needs to be read and decompressed or compressed and written.
2941
2945
2942 This command measures the time it takes to read+decompress and recompress
2946 This command measures the time it takes to read+decompress and recompress
2943 chunks in a revlog. It effectively isolates I/O and compression performance.
2947 chunks in a revlog. It effectively isolates I/O and compression performance.
2944 For measurements of higher-level operations like resolving revisions,
2948 For measurements of higher-level operations like resolving revisions,
2945 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2949 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2946 """
2950 """
2947 opts = _byteskwargs(opts)
2951 opts = _byteskwargs(opts)
2948
2952
2949 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2953 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2950
2954
2951 # _chunkraw was renamed to _getsegmentforrevs.
2955 # _chunkraw was renamed to _getsegmentforrevs.
2952 try:
2956 try:
2953 segmentforrevs = rl._getsegmentforrevs
2957 segmentforrevs = rl._getsegmentforrevs
2954 except AttributeError:
2958 except AttributeError:
2955 segmentforrevs = rl._chunkraw
2959 segmentforrevs = rl._chunkraw
2956
2960
2957 # Verify engines argument.
2961 # Verify engines argument.
2958 if engines:
2962 if engines:
2959 engines = set(e.strip() for e in engines.split(b','))
2963 engines = set(e.strip() for e in engines.split(b','))
2960 for engine in engines:
2964 for engine in engines:
2961 try:
2965 try:
2962 util.compressionengines[engine]
2966 util.compressionengines[engine]
2963 except KeyError:
2967 except KeyError:
2964 raise error.Abort(b'unknown compression engine: %s' % engine)
2968 raise error.Abort(b'unknown compression engine: %s' % engine)
2965 else:
2969 else:
2966 engines = []
2970 engines = []
2967 for e in util.compengines:
2971 for e in util.compengines:
2968 engine = util.compengines[e]
2972 engine = util.compengines[e]
2969 try:
2973 try:
2970 if engine.available():
2974 if engine.available():
2971 engine.revlogcompressor().compress(b'dummy')
2975 engine.revlogcompressor().compress(b'dummy')
2972 engines.append(e)
2976 engines.append(e)
2973 except NotImplementedError:
2977 except NotImplementedError:
2974 pass
2978 pass
2975
2979
2976 revs = list(rl.revs(startrev, len(rl) - 1))
2980 revs = list(rl.revs(startrev, len(rl) - 1))
2977
2981
2978 def rlfh(rl):
2982 def rlfh(rl):
2979 if rl._inline:
2983 if rl._inline:
2980 return getsvfs(repo)(rl.indexfile)
2984 return getsvfs(repo)(rl.indexfile)
2981 else:
2985 else:
2982 return getsvfs(repo)(rl.datafile)
2986 return getsvfs(repo)(rl.datafile)
2983
2987
2984 def doread():
2988 def doread():
2985 rl.clearcaches()
2989 rl.clearcaches()
2986 for rev in revs:
2990 for rev in revs:
2987 segmentforrevs(rev, rev)
2991 segmentforrevs(rev, rev)
2988
2992
2989 def doreadcachedfh():
2993 def doreadcachedfh():
2990 rl.clearcaches()
2994 rl.clearcaches()
2991 fh = rlfh(rl)
2995 fh = rlfh(rl)
2992 for rev in revs:
2996 for rev in revs:
2993 segmentforrevs(rev, rev, df=fh)
2997 segmentforrevs(rev, rev, df=fh)
2994
2998
2995 def doreadbatch():
2999 def doreadbatch():
2996 rl.clearcaches()
3000 rl.clearcaches()
2997 segmentforrevs(revs[0], revs[-1])
3001 segmentforrevs(revs[0], revs[-1])
2998
3002
2999 def doreadbatchcachedfh():
3003 def doreadbatchcachedfh():
3000 rl.clearcaches()
3004 rl.clearcaches()
3001 fh = rlfh(rl)
3005 fh = rlfh(rl)
3002 segmentforrevs(revs[0], revs[-1], df=fh)
3006 segmentforrevs(revs[0], revs[-1], df=fh)
3003
3007
3004 def dochunk():
3008 def dochunk():
3005 rl.clearcaches()
3009 rl.clearcaches()
3006 fh = rlfh(rl)
3010 fh = rlfh(rl)
3007 for rev in revs:
3011 for rev in revs:
3008 rl._chunk(rev, df=fh)
3012 rl._chunk(rev, df=fh)
3009
3013
3010 chunks = [None]
3014 chunks = [None]
3011
3015
3012 def dochunkbatch():
3016 def dochunkbatch():
3013 rl.clearcaches()
3017 rl.clearcaches()
3014 fh = rlfh(rl)
3018 fh = rlfh(rl)
3015 # Save chunks as a side-effect.
3019 # Save chunks as a side-effect.
3016 chunks[0] = rl._chunks(revs, df=fh)
3020 chunks[0] = rl._chunks(revs, df=fh)
3017
3021
3018 def docompress(compressor):
3022 def docompress(compressor):
3019 rl.clearcaches()
3023 rl.clearcaches()
3020
3024
3021 try:
3025 try:
3022 # Swap in the requested compression engine.
3026 # Swap in the requested compression engine.
3023 oldcompressor = rl._compressor
3027 oldcompressor = rl._compressor
3024 rl._compressor = compressor
3028 rl._compressor = compressor
3025 for chunk in chunks[0]:
3029 for chunk in chunks[0]:
3026 rl.compress(chunk)
3030 rl.compress(chunk)
3027 finally:
3031 finally:
3028 rl._compressor = oldcompressor
3032 rl._compressor = oldcompressor
3029
3033
3030 benches = [
3034 benches = [
3031 (lambda: doread(), b'read'),
3035 (lambda: doread(), b'read'),
3032 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3036 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3033 (lambda: doreadbatch(), b'read batch'),
3037 (lambda: doreadbatch(), b'read batch'),
3034 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3038 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3035 (lambda: dochunk(), b'chunk'),
3039 (lambda: dochunk(), b'chunk'),
3036 (lambda: dochunkbatch(), b'chunk batch'),
3040 (lambda: dochunkbatch(), b'chunk batch'),
3037 ]
3041 ]
3038
3042
3039 for engine in sorted(engines):
3043 for engine in sorted(engines):
3040 compressor = util.compengines[engine].revlogcompressor()
3044 compressor = util.compengines[engine].revlogcompressor()
3041 benches.append(
3045 benches.append(
3042 (
3046 (
3043 functools.partial(docompress, compressor),
3047 functools.partial(docompress, compressor),
3044 b'compress w/ %s' % engine,
3048 b'compress w/ %s' % engine,
3045 )
3049 )
3046 )
3050 )
3047
3051
3048 for fn, title in benches:
3052 for fn, title in benches:
3049 timer, fm = gettimer(ui, opts)
3053 timer, fm = gettimer(ui, opts)
3050 timer(fn, title=title)
3054 timer(fn, title=title)
3051 fm.end()
3055 fm.end()
3052
3056
3053
3057
3054 @command(
3058 @command(
3055 b'perfrevlogrevision',
3059 b'perfrevlogrevision',
3056 revlogopts
3060 revlogopts
3057 + formatteropts
3061 + formatteropts
3058 + [(b'', b'cache', False, b'use caches instead of clearing')],
3062 + [(b'', b'cache', False, b'use caches instead of clearing')],
3059 b'-c|-m|FILE REV',
3063 b'-c|-m|FILE REV',
3060 )
3064 )
3061 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3065 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3062 """Benchmark obtaining a revlog revision.
3066 """Benchmark obtaining a revlog revision.
3063
3067
3064 Obtaining a revlog revision consists of roughly the following steps:
3068 Obtaining a revlog revision consists of roughly the following steps:
3065
3069
3066 1. Compute the delta chain
3070 1. Compute the delta chain
3067 2. Slice the delta chain if applicable
3071 2. Slice the delta chain if applicable
3068 3. Obtain the raw chunks for that delta chain
3072 3. Obtain the raw chunks for that delta chain
3069 4. Decompress each raw chunk
3073 4. Decompress each raw chunk
3070 5. Apply binary patches to obtain fulltext
3074 5. Apply binary patches to obtain fulltext
3071 6. Verify hash of fulltext
3075 6. Verify hash of fulltext
3072
3076
3073 This command measures the time spent in each of these phases.
3077 This command measures the time spent in each of these phases.
3074 """
3078 """
3075 opts = _byteskwargs(opts)
3079 opts = _byteskwargs(opts)
3076
3080
3077 if opts.get(b'changelog') or opts.get(b'manifest'):
3081 if opts.get(b'changelog') or opts.get(b'manifest'):
3078 file_, rev = None, file_
3082 file_, rev = None, file_
3079 elif rev is None:
3083 elif rev is None:
3080 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3084 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3081
3085
3082 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3086 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3083
3087
3084 # _chunkraw was renamed to _getsegmentforrevs.
3088 # _chunkraw was renamed to _getsegmentforrevs.
3085 try:
3089 try:
3086 segmentforrevs = r._getsegmentforrevs
3090 segmentforrevs = r._getsegmentforrevs
3087 except AttributeError:
3091 except AttributeError:
3088 segmentforrevs = r._chunkraw
3092 segmentforrevs = r._chunkraw
3089
3093
3090 node = r.lookup(rev)
3094 node = r.lookup(rev)
3091 rev = r.rev(node)
3095 rev = r.rev(node)
3092
3096
3093 def getrawchunks(data, chain):
3097 def getrawchunks(data, chain):
3094 start = r.start
3098 start = r.start
3095 length = r.length
3099 length = r.length
3096 inline = r._inline
3100 inline = r._inline
3097 iosize = r._io.size
3101 iosize = r._io.size
3098 buffer = util.buffer
3102 buffer = util.buffer
3099
3103
3100 chunks = []
3104 chunks = []
3101 ladd = chunks.append
3105 ladd = chunks.append
3102 for idx, item in enumerate(chain):
3106 for idx, item in enumerate(chain):
3103 offset = start(item[0])
3107 offset = start(item[0])
3104 bits = data[idx]
3108 bits = data[idx]
3105 for rev in item:
3109 for rev in item:
3106 chunkstart = start(rev)
3110 chunkstart = start(rev)
3107 if inline:
3111 if inline:
3108 chunkstart += (rev + 1) * iosize
3112 chunkstart += (rev + 1) * iosize
3109 chunklength = length(rev)
3113 chunklength = length(rev)
3110 ladd(buffer(bits, chunkstart - offset, chunklength))
3114 ladd(buffer(bits, chunkstart - offset, chunklength))
3111
3115
3112 return chunks
3116 return chunks
3113
3117
3114 def dodeltachain(rev):
3118 def dodeltachain(rev):
3115 if not cache:
3119 if not cache:
3116 r.clearcaches()
3120 r.clearcaches()
3117 r._deltachain(rev)
3121 r._deltachain(rev)
3118
3122
3119 def doread(chain):
3123 def doread(chain):
3120 if not cache:
3124 if not cache:
3121 r.clearcaches()
3125 r.clearcaches()
3122 for item in slicedchain:
3126 for item in slicedchain:
3123 segmentforrevs(item[0], item[-1])
3127 segmentforrevs(item[0], item[-1])
3124
3128
3125 def doslice(r, chain, size):
3129 def doslice(r, chain, size):
3126 for s in slicechunk(r, chain, targetsize=size):
3130 for s in slicechunk(r, chain, targetsize=size):
3127 pass
3131 pass
3128
3132
3129 def dorawchunks(data, chain):
3133 def dorawchunks(data, chain):
3130 if not cache:
3134 if not cache:
3131 r.clearcaches()
3135 r.clearcaches()
3132 getrawchunks(data, chain)
3136 getrawchunks(data, chain)
3133
3137
3134 def dodecompress(chunks):
3138 def dodecompress(chunks):
3135 decomp = r.decompress
3139 decomp = r.decompress
3136 for chunk in chunks:
3140 for chunk in chunks:
3137 decomp(chunk)
3141 decomp(chunk)
3138
3142
3139 def dopatch(text, bins):
3143 def dopatch(text, bins):
3140 if not cache:
3144 if not cache:
3141 r.clearcaches()
3145 r.clearcaches()
3142 mdiff.patches(text, bins)
3146 mdiff.patches(text, bins)
3143
3147
3144 def dohash(text):
3148 def dohash(text):
3145 if not cache:
3149 if not cache:
3146 r.clearcaches()
3150 r.clearcaches()
3147 r.checkhash(text, node, rev=rev)
3151 r.checkhash(text, node, rev=rev)
3148
3152
3149 def dorevision():
3153 def dorevision():
3150 if not cache:
3154 if not cache:
3151 r.clearcaches()
3155 r.clearcaches()
3152 r.revision(node)
3156 r.revision(node)
3153
3157
3154 try:
3158 try:
3155 from mercurial.revlogutils.deltas import slicechunk
3159 from mercurial.revlogutils.deltas import slicechunk
3156 except ImportError:
3160 except ImportError:
3157 slicechunk = getattr(revlog, '_slicechunk', None)
3161 slicechunk = getattr(revlog, '_slicechunk', None)
3158
3162
3159 size = r.length(rev)
3163 size = r.length(rev)
3160 chain = r._deltachain(rev)[0]
3164 chain = r._deltachain(rev)[0]
3161 if not getattr(r, '_withsparseread', False):
3165 if not getattr(r, '_withsparseread', False):
3162 slicedchain = (chain,)
3166 slicedchain = (chain,)
3163 else:
3167 else:
3164 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3168 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3165 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3169 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3166 rawchunks = getrawchunks(data, slicedchain)
3170 rawchunks = getrawchunks(data, slicedchain)
3167 bins = r._chunks(chain)
3171 bins = r._chunks(chain)
3168 text = bytes(bins[0])
3172 text = bytes(bins[0])
3169 bins = bins[1:]
3173 bins = bins[1:]
3170 text = mdiff.patches(text, bins)
3174 text = mdiff.patches(text, bins)
3171
3175
3172 benches = [
3176 benches = [
3173 (lambda: dorevision(), b'full'),
3177 (lambda: dorevision(), b'full'),
3174 (lambda: dodeltachain(rev), b'deltachain'),
3178 (lambda: dodeltachain(rev), b'deltachain'),
3175 (lambda: doread(chain), b'read'),
3179 (lambda: doread(chain), b'read'),
3176 ]
3180 ]
3177
3181
3178 if getattr(r, '_withsparseread', False):
3182 if getattr(r, '_withsparseread', False):
3179 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3183 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3180 benches.append(slicing)
3184 benches.append(slicing)
3181
3185
3182 benches.extend(
3186 benches.extend(
3183 [
3187 [
3184 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3188 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3185 (lambda: dodecompress(rawchunks), b'decompress'),
3189 (lambda: dodecompress(rawchunks), b'decompress'),
3186 (lambda: dopatch(text, bins), b'patch'),
3190 (lambda: dopatch(text, bins), b'patch'),
3187 (lambda: dohash(text), b'hash'),
3191 (lambda: dohash(text), b'hash'),
3188 ]
3192 ]
3189 )
3193 )
3190
3194
3191 timer, fm = gettimer(ui, opts)
3195 timer, fm = gettimer(ui, opts)
3192 for fn, title in benches:
3196 for fn, title in benches:
3193 timer(fn, title=title)
3197 timer(fn, title=title)
3194 fm.end()
3198 fm.end()
3195
3199
3196
3200
3197 @command(
3201 @command(
3198 b'perfrevset',
3202 b'perfrevset',
3199 [
3203 [
3200 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3204 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3201 (b'', b'contexts', False, b'obtain changectx for each revision'),
3205 (b'', b'contexts', False, b'obtain changectx for each revision'),
3202 ]
3206 ]
3203 + formatteropts,
3207 + formatteropts,
3204 b"REVSET",
3208 b"REVSET",
3205 )
3209 )
3206 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3210 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3207 """benchmark the execution time of a revset
3211 """benchmark the execution time of a revset
3208
3212
3209 Use the --clean option if need to evaluate the impact of build volatile
3213 Use the --clean option if need to evaluate the impact of build volatile
3210 revisions set cache on the revset execution. Volatile cache hold filtered
3214 revisions set cache on the revset execution. Volatile cache hold filtered
3211 and obsolete related cache."""
3215 and obsolete related cache."""
3212 opts = _byteskwargs(opts)
3216 opts = _byteskwargs(opts)
3213
3217
3214 timer, fm = gettimer(ui, opts)
3218 timer, fm = gettimer(ui, opts)
3215
3219
3216 def d():
3220 def d():
3217 if clear:
3221 if clear:
3218 repo.invalidatevolatilesets()
3222 repo.invalidatevolatilesets()
3219 if contexts:
3223 if contexts:
3220 for ctx in repo.set(expr):
3224 for ctx in repo.set(expr):
3221 pass
3225 pass
3222 else:
3226 else:
3223 for r in repo.revs(expr):
3227 for r in repo.revs(expr):
3224 pass
3228 pass
3225
3229
3226 timer(d)
3230 timer(d)
3227 fm.end()
3231 fm.end()
3228
3232
3229
3233
3230 @command(
3234 @command(
3231 b'perfvolatilesets',
3235 b'perfvolatilesets',
3232 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3236 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3233 + formatteropts,
3237 + formatteropts,
3234 )
3238 )
3235 def perfvolatilesets(ui, repo, *names, **opts):
3239 def perfvolatilesets(ui, repo, *names, **opts):
3236 """benchmark the computation of various volatile set
3240 """benchmark the computation of various volatile set
3237
3241
3238 Volatile set computes element related to filtering and obsolescence."""
3242 Volatile set computes element related to filtering and obsolescence."""
3239 opts = _byteskwargs(opts)
3243 opts = _byteskwargs(opts)
3240 timer, fm = gettimer(ui, opts)
3244 timer, fm = gettimer(ui, opts)
3241 repo = repo.unfiltered()
3245 repo = repo.unfiltered()
3242
3246
3243 def getobs(name):
3247 def getobs(name):
3244 def d():
3248 def d():
3245 repo.invalidatevolatilesets()
3249 repo.invalidatevolatilesets()
3246 if opts[b'clear_obsstore']:
3250 if opts[b'clear_obsstore']:
3247 clearfilecache(repo, b'obsstore')
3251 clearfilecache(repo, b'obsstore')
3248 obsolete.getrevs(repo, name)
3252 obsolete.getrevs(repo, name)
3249
3253
3250 return d
3254 return d
3251
3255
3252 allobs = sorted(obsolete.cachefuncs)
3256 allobs = sorted(obsolete.cachefuncs)
3253 if names:
3257 if names:
3254 allobs = [n for n in allobs if n in names]
3258 allobs = [n for n in allobs if n in names]
3255
3259
3256 for name in allobs:
3260 for name in allobs:
3257 timer(getobs(name), title=name)
3261 timer(getobs(name), title=name)
3258
3262
3259 def getfiltered(name):
3263 def getfiltered(name):
3260 def d():
3264 def d():
3261 repo.invalidatevolatilesets()
3265 repo.invalidatevolatilesets()
3262 if opts[b'clear_obsstore']:
3266 if opts[b'clear_obsstore']:
3263 clearfilecache(repo, b'obsstore')
3267 clearfilecache(repo, b'obsstore')
3264 repoview.filterrevs(repo, name)
3268 repoview.filterrevs(repo, name)
3265
3269
3266 return d
3270 return d
3267
3271
3268 allfilter = sorted(repoview.filtertable)
3272 allfilter = sorted(repoview.filtertable)
3269 if names:
3273 if names:
3270 allfilter = [n for n in allfilter if n in names]
3274 allfilter = [n for n in allfilter if n in names]
3271
3275
3272 for name in allfilter:
3276 for name in allfilter:
3273 timer(getfiltered(name), title=name)
3277 timer(getfiltered(name), title=name)
3274 fm.end()
3278 fm.end()
3275
3279
3276
3280
3277 @command(
3281 @command(
3278 b'perfbranchmap',
3282 b'perfbranchmap',
3279 [
3283 [
3280 (b'f', b'full', False, b'Includes build time of subset'),
3284 (b'f', b'full', False, b'Includes build time of subset'),
3281 (
3285 (
3282 b'',
3286 b'',
3283 b'clear-revbranch',
3287 b'clear-revbranch',
3284 False,
3288 False,
3285 b'purge the revbranch cache between computation',
3289 b'purge the revbranch cache between computation',
3286 ),
3290 ),
3287 ]
3291 ]
3288 + formatteropts,
3292 + formatteropts,
3289 )
3293 )
3290 def perfbranchmap(ui, repo, *filternames, **opts):
3294 def perfbranchmap(ui, repo, *filternames, **opts):
3291 """benchmark the update of a branchmap
3295 """benchmark the update of a branchmap
3292
3296
3293 This benchmarks the full repo.branchmap() call with read and write disabled
3297 This benchmarks the full repo.branchmap() call with read and write disabled
3294 """
3298 """
3295 opts = _byteskwargs(opts)
3299 opts = _byteskwargs(opts)
3296 full = opts.get(b"full", False)
3300 full = opts.get(b"full", False)
3297 clear_revbranch = opts.get(b"clear_revbranch", False)
3301 clear_revbranch = opts.get(b"clear_revbranch", False)
3298 timer, fm = gettimer(ui, opts)
3302 timer, fm = gettimer(ui, opts)
3299
3303
3300 def getbranchmap(filtername):
3304 def getbranchmap(filtername):
3301 """generate a benchmark function for the filtername"""
3305 """generate a benchmark function for the filtername"""
3302 if filtername is None:
3306 if filtername is None:
3303 view = repo
3307 view = repo
3304 else:
3308 else:
3305 view = repo.filtered(filtername)
3309 view = repo.filtered(filtername)
3306 if util.safehasattr(view._branchcaches, '_per_filter'):
3310 if util.safehasattr(view._branchcaches, '_per_filter'):
3307 filtered = view._branchcaches._per_filter
3311 filtered = view._branchcaches._per_filter
3308 else:
3312 else:
3309 # older versions
3313 # older versions
3310 filtered = view._branchcaches
3314 filtered = view._branchcaches
3311
3315
3312 def d():
3316 def d():
3313 if clear_revbranch:
3317 if clear_revbranch:
3314 repo.revbranchcache()._clear()
3318 repo.revbranchcache()._clear()
3315 if full:
3319 if full:
3316 view._branchcaches.clear()
3320 view._branchcaches.clear()
3317 else:
3321 else:
3318 filtered.pop(filtername, None)
3322 filtered.pop(filtername, None)
3319 view.branchmap()
3323 view.branchmap()
3320
3324
3321 return d
3325 return d
3322
3326
3323 # add filter in smaller subset to bigger subset
3327 # add filter in smaller subset to bigger subset
3324 possiblefilters = set(repoview.filtertable)
3328 possiblefilters = set(repoview.filtertable)
3325 if filternames:
3329 if filternames:
3326 possiblefilters &= set(filternames)
3330 possiblefilters &= set(filternames)
3327 subsettable = getbranchmapsubsettable()
3331 subsettable = getbranchmapsubsettable()
3328 allfilters = []
3332 allfilters = []
3329 while possiblefilters:
3333 while possiblefilters:
3330 for name in possiblefilters:
3334 for name in possiblefilters:
3331 subset = subsettable.get(name)
3335 subset = subsettable.get(name)
3332 if subset not in possiblefilters:
3336 if subset not in possiblefilters:
3333 break
3337 break
3334 else:
3338 else:
3335 assert False, b'subset cycle %s!' % possiblefilters
3339 assert False, b'subset cycle %s!' % possiblefilters
3336 allfilters.append(name)
3340 allfilters.append(name)
3337 possiblefilters.remove(name)
3341 possiblefilters.remove(name)
3338
3342
3339 # warm the cache
3343 # warm the cache
3340 if not full:
3344 if not full:
3341 for name in allfilters:
3345 for name in allfilters:
3342 repo.filtered(name).branchmap()
3346 repo.filtered(name).branchmap()
3343 if not filternames or b'unfiltered' in filternames:
3347 if not filternames or b'unfiltered' in filternames:
3344 # add unfiltered
3348 # add unfiltered
3345 allfilters.append(None)
3349 allfilters.append(None)
3346
3350
3347 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3351 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3348 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3352 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3349 branchcacheread.set(classmethod(lambda *args: None))
3353 branchcacheread.set(classmethod(lambda *args: None))
3350 else:
3354 else:
3351 # older versions
3355 # older versions
3352 branchcacheread = safeattrsetter(branchmap, b'read')
3356 branchcacheread = safeattrsetter(branchmap, b'read')
3353 branchcacheread.set(lambda *args: None)
3357 branchcacheread.set(lambda *args: None)
3354 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3358 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3355 branchcachewrite.set(lambda *args: None)
3359 branchcachewrite.set(lambda *args: None)
3356 try:
3360 try:
3357 for name in allfilters:
3361 for name in allfilters:
3358 printname = name
3362 printname = name
3359 if name is None:
3363 if name is None:
3360 printname = b'unfiltered'
3364 printname = b'unfiltered'
3361 timer(getbranchmap(name), title=str(printname))
3365 timer(getbranchmap(name), title=str(printname))
3362 finally:
3366 finally:
3363 branchcacheread.restore()
3367 branchcacheread.restore()
3364 branchcachewrite.restore()
3368 branchcachewrite.restore()
3365 fm.end()
3369 fm.end()
3366
3370
3367
3371
3368 @command(
3372 @command(
3369 b'perfbranchmapupdate',
3373 b'perfbranchmapupdate',
3370 [
3374 [
3371 (b'', b'base', [], b'subset of revision to start from'),
3375 (b'', b'base', [], b'subset of revision to start from'),
3372 (b'', b'target', [], b'subset of revision to end with'),
3376 (b'', b'target', [], b'subset of revision to end with'),
3373 (b'', b'clear-caches', False, b'clear cache between each runs'),
3377 (b'', b'clear-caches', False, b'clear cache between each runs'),
3374 ]
3378 ]
3375 + formatteropts,
3379 + formatteropts,
3376 )
3380 )
3377 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3381 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3378 """benchmark branchmap update from for <base> revs to <target> revs
3382 """benchmark branchmap update from for <base> revs to <target> revs
3379
3383
3380 If `--clear-caches` is passed, the following items will be reset before
3384 If `--clear-caches` is passed, the following items will be reset before
3381 each update:
3385 each update:
3382 * the changelog instance and associated indexes
3386 * the changelog instance and associated indexes
3383 * the rev-branch-cache instance
3387 * the rev-branch-cache instance
3384
3388
3385 Examples:
3389 Examples:
3386
3390
3387 # update for the one last revision
3391 # update for the one last revision
3388 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3392 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3389
3393
3390 $ update for change coming with a new branch
3394 $ update for change coming with a new branch
3391 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3395 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3392 """
3396 """
3393 from mercurial import branchmap
3397 from mercurial import branchmap
3394 from mercurial import repoview
3398 from mercurial import repoview
3395
3399
3396 opts = _byteskwargs(opts)
3400 opts = _byteskwargs(opts)
3397 timer, fm = gettimer(ui, opts)
3401 timer, fm = gettimer(ui, opts)
3398 clearcaches = opts[b'clear_caches']
3402 clearcaches = opts[b'clear_caches']
3399 unfi = repo.unfiltered()
3403 unfi = repo.unfiltered()
3400 x = [None] # used to pass data between closure
3404 x = [None] # used to pass data between closure
3401
3405
3402 # we use a `list` here to avoid possible side effect from smartset
3406 # we use a `list` here to avoid possible side effect from smartset
3403 baserevs = list(scmutil.revrange(repo, base))
3407 baserevs = list(scmutil.revrange(repo, base))
3404 targetrevs = list(scmutil.revrange(repo, target))
3408 targetrevs = list(scmutil.revrange(repo, target))
3405 if not baserevs:
3409 if not baserevs:
3406 raise error.Abort(b'no revisions selected for --base')
3410 raise error.Abort(b'no revisions selected for --base')
3407 if not targetrevs:
3411 if not targetrevs:
3408 raise error.Abort(b'no revisions selected for --target')
3412 raise error.Abort(b'no revisions selected for --target')
3409
3413
3410 # make sure the target branchmap also contains the one in the base
3414 # make sure the target branchmap also contains the one in the base
3411 targetrevs = list(set(baserevs) | set(targetrevs))
3415 targetrevs = list(set(baserevs) | set(targetrevs))
3412 targetrevs.sort()
3416 targetrevs.sort()
3413
3417
3414 cl = repo.changelog
3418 cl = repo.changelog
3415 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3419 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3416 allbaserevs.sort()
3420 allbaserevs.sort()
3417 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3421 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3418
3422
3419 newrevs = list(alltargetrevs.difference(allbaserevs))
3423 newrevs = list(alltargetrevs.difference(allbaserevs))
3420 newrevs.sort()
3424 newrevs.sort()
3421
3425
3422 allrevs = frozenset(unfi.changelog.revs())
3426 allrevs = frozenset(unfi.changelog.revs())
3423 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3427 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3424 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3428 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3425
3429
3426 def basefilter(repo, visibilityexceptions=None):
3430 def basefilter(repo, visibilityexceptions=None):
3427 return basefilterrevs
3431 return basefilterrevs
3428
3432
3429 def targetfilter(repo, visibilityexceptions=None):
3433 def targetfilter(repo, visibilityexceptions=None):
3430 return targetfilterrevs
3434 return targetfilterrevs
3431
3435
3432 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3436 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3433 ui.status(msg % (len(allbaserevs), len(newrevs)))
3437 ui.status(msg % (len(allbaserevs), len(newrevs)))
3434 if targetfilterrevs:
3438 if targetfilterrevs:
3435 msg = b'(%d revisions still filtered)\n'
3439 msg = b'(%d revisions still filtered)\n'
3436 ui.status(msg % len(targetfilterrevs))
3440 ui.status(msg % len(targetfilterrevs))
3437
3441
3438 try:
3442 try:
3439 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3443 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3440 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3444 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3441
3445
3442 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3446 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3443 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3447 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3444
3448
3445 # try to find an existing branchmap to reuse
3449 # try to find an existing branchmap to reuse
3446 subsettable = getbranchmapsubsettable()
3450 subsettable = getbranchmapsubsettable()
3447 candidatefilter = subsettable.get(None)
3451 candidatefilter = subsettable.get(None)
3448 while candidatefilter is not None:
3452 while candidatefilter is not None:
3449 candidatebm = repo.filtered(candidatefilter).branchmap()
3453 candidatebm = repo.filtered(candidatefilter).branchmap()
3450 if candidatebm.validfor(baserepo):
3454 if candidatebm.validfor(baserepo):
3451 filtered = repoview.filterrevs(repo, candidatefilter)
3455 filtered = repoview.filterrevs(repo, candidatefilter)
3452 missing = [r for r in allbaserevs if r in filtered]
3456 missing = [r for r in allbaserevs if r in filtered]
3453 base = candidatebm.copy()
3457 base = candidatebm.copy()
3454 base.update(baserepo, missing)
3458 base.update(baserepo, missing)
3455 break
3459 break
3456 candidatefilter = subsettable.get(candidatefilter)
3460 candidatefilter = subsettable.get(candidatefilter)
3457 else:
3461 else:
3458 # no suitable subset where found
3462 # no suitable subset where found
3459 base = branchmap.branchcache()
3463 base = branchmap.branchcache()
3460 base.update(baserepo, allbaserevs)
3464 base.update(baserepo, allbaserevs)
3461
3465
3462 def setup():
3466 def setup():
3463 x[0] = base.copy()
3467 x[0] = base.copy()
3464 if clearcaches:
3468 if clearcaches:
3465 unfi._revbranchcache = None
3469 unfi._revbranchcache = None
3466 clearchangelog(repo)
3470 clearchangelog(repo)
3467
3471
3468 def bench():
3472 def bench():
3469 x[0].update(targetrepo, newrevs)
3473 x[0].update(targetrepo, newrevs)
3470
3474
3471 timer(bench, setup=setup)
3475 timer(bench, setup=setup)
3472 fm.end()
3476 fm.end()
3473 finally:
3477 finally:
3474 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3478 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3475 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3479 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3476
3480
3477
3481
3478 @command(
3482 @command(
3479 b'perfbranchmapload',
3483 b'perfbranchmapload',
3480 [
3484 [
3481 (b'f', b'filter', b'', b'Specify repoview filter'),
3485 (b'f', b'filter', b'', b'Specify repoview filter'),
3482 (b'', b'list', False, b'List brachmap filter caches'),
3486 (b'', b'list', False, b'List brachmap filter caches'),
3483 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3487 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3484 ]
3488 ]
3485 + formatteropts,
3489 + formatteropts,
3486 )
3490 )
3487 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3491 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3488 """benchmark reading the branchmap"""
3492 """benchmark reading the branchmap"""
3489 opts = _byteskwargs(opts)
3493 opts = _byteskwargs(opts)
3490 clearrevlogs = opts[b'clear_revlogs']
3494 clearrevlogs = opts[b'clear_revlogs']
3491
3495
3492 if list:
3496 if list:
3493 for name, kind, st in repo.cachevfs.readdir(stat=True):
3497 for name, kind, st in repo.cachevfs.readdir(stat=True):
3494 if name.startswith(b'branch2'):
3498 if name.startswith(b'branch2'):
3495 filtername = name.partition(b'-')[2] or b'unfiltered'
3499 filtername = name.partition(b'-')[2] or b'unfiltered'
3496 ui.status(
3500 ui.status(
3497 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3501 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3498 )
3502 )
3499 return
3503 return
3500 if not filter:
3504 if not filter:
3501 filter = None
3505 filter = None
3502 subsettable = getbranchmapsubsettable()
3506 subsettable = getbranchmapsubsettable()
3503 if filter is None:
3507 if filter is None:
3504 repo = repo.unfiltered()
3508 repo = repo.unfiltered()
3505 else:
3509 else:
3506 repo = repoview.repoview(repo, filter)
3510 repo = repoview.repoview(repo, filter)
3507
3511
3508 repo.branchmap() # make sure we have a relevant, up to date branchmap
3512 repo.branchmap() # make sure we have a relevant, up to date branchmap
3509
3513
3510 try:
3514 try:
3511 fromfile = branchmap.branchcache.fromfile
3515 fromfile = branchmap.branchcache.fromfile
3512 except AttributeError:
3516 except AttributeError:
3513 # older versions
3517 # older versions
3514 fromfile = branchmap.read
3518 fromfile = branchmap.read
3515
3519
3516 currentfilter = filter
3520 currentfilter = filter
3517 # try once without timer, the filter may not be cached
3521 # try once without timer, the filter may not be cached
3518 while fromfile(repo) is None:
3522 while fromfile(repo) is None:
3519 currentfilter = subsettable.get(currentfilter)
3523 currentfilter = subsettable.get(currentfilter)
3520 if currentfilter is None:
3524 if currentfilter is None:
3521 raise error.Abort(
3525 raise error.Abort(
3522 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3526 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3523 )
3527 )
3524 repo = repo.filtered(currentfilter)
3528 repo = repo.filtered(currentfilter)
3525 timer, fm = gettimer(ui, opts)
3529 timer, fm = gettimer(ui, opts)
3526
3530
3527 def setup():
3531 def setup():
3528 if clearrevlogs:
3532 if clearrevlogs:
3529 clearchangelog(repo)
3533 clearchangelog(repo)
3530
3534
3531 def bench():
3535 def bench():
3532 fromfile(repo)
3536 fromfile(repo)
3533
3537
3534 timer(bench, setup=setup)
3538 timer(bench, setup=setup)
3535 fm.end()
3539 fm.end()
3536
3540
3537
3541
3538 @command(b'perfloadmarkers')
3542 @command(b'perfloadmarkers')
3539 def perfloadmarkers(ui, repo):
3543 def perfloadmarkers(ui, repo):
3540 """benchmark the time to parse the on-disk markers for a repo
3544 """benchmark the time to parse the on-disk markers for a repo
3541
3545
3542 Result is the number of markers in the repo."""
3546 Result is the number of markers in the repo."""
3543 timer, fm = gettimer(ui)
3547 timer, fm = gettimer(ui)
3544 svfs = getsvfs(repo)
3548 svfs = getsvfs(repo)
3545 timer(lambda: len(obsolete.obsstore(svfs)))
3549 timer(lambda: len(obsolete.obsstore(svfs)))
3546 fm.end()
3550 fm.end()
3547
3551
3548
3552
3549 @command(
3553 @command(
3550 b'perflrucachedict',
3554 b'perflrucachedict',
3551 formatteropts
3555 formatteropts
3552 + [
3556 + [
3553 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3557 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3554 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3558 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3555 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3559 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3556 (b'', b'size', 4, b'size of cache'),
3560 (b'', b'size', 4, b'size of cache'),
3557 (b'', b'gets', 10000, b'number of key lookups'),
3561 (b'', b'gets', 10000, b'number of key lookups'),
3558 (b'', b'sets', 10000, b'number of key sets'),
3562 (b'', b'sets', 10000, b'number of key sets'),
3559 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3563 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3560 (
3564 (
3561 b'',
3565 b'',
3562 b'mixedgetfreq',
3566 b'mixedgetfreq',
3563 50,
3567 50,
3564 b'frequency of get vs set ops in mixed mode',
3568 b'frequency of get vs set ops in mixed mode',
3565 ),
3569 ),
3566 ],
3570 ],
3567 norepo=True,
3571 norepo=True,
3568 )
3572 )
3569 def perflrucache(
3573 def perflrucache(
3570 ui,
3574 ui,
3571 mincost=0,
3575 mincost=0,
3572 maxcost=100,
3576 maxcost=100,
3573 costlimit=0,
3577 costlimit=0,
3574 size=4,
3578 size=4,
3575 gets=10000,
3579 gets=10000,
3576 sets=10000,
3580 sets=10000,
3577 mixed=10000,
3581 mixed=10000,
3578 mixedgetfreq=50,
3582 mixedgetfreq=50,
3579 **opts
3583 **opts
3580 ):
3584 ):
3581 opts = _byteskwargs(opts)
3585 opts = _byteskwargs(opts)
3582
3586
3583 def doinit():
3587 def doinit():
3584 for i in _xrange(10000):
3588 for i in _xrange(10000):
3585 util.lrucachedict(size)
3589 util.lrucachedict(size)
3586
3590
3587 costrange = list(range(mincost, maxcost + 1))
3591 costrange = list(range(mincost, maxcost + 1))
3588
3592
3589 values = []
3593 values = []
3590 for i in _xrange(size):
3594 for i in _xrange(size):
3591 values.append(random.randint(0, _maxint))
3595 values.append(random.randint(0, _maxint))
3592
3596
3593 # Get mode fills the cache and tests raw lookup performance with no
3597 # Get mode fills the cache and tests raw lookup performance with no
3594 # eviction.
3598 # eviction.
3595 getseq = []
3599 getseq = []
3596 for i in _xrange(gets):
3600 for i in _xrange(gets):
3597 getseq.append(random.choice(values))
3601 getseq.append(random.choice(values))
3598
3602
3599 def dogets():
3603 def dogets():
3600 d = util.lrucachedict(size)
3604 d = util.lrucachedict(size)
3601 for v in values:
3605 for v in values:
3602 d[v] = v
3606 d[v] = v
3603 for key in getseq:
3607 for key in getseq:
3604 value = d[key]
3608 value = d[key]
3605 value # silence pyflakes warning
3609 value # silence pyflakes warning
3606
3610
3607 def dogetscost():
3611 def dogetscost():
3608 d = util.lrucachedict(size, maxcost=costlimit)
3612 d = util.lrucachedict(size, maxcost=costlimit)
3609 for i, v in enumerate(values):
3613 for i, v in enumerate(values):
3610 d.insert(v, v, cost=costs[i])
3614 d.insert(v, v, cost=costs[i])
3611 for key in getseq:
3615 for key in getseq:
3612 try:
3616 try:
3613 value = d[key]
3617 value = d[key]
3614 value # silence pyflakes warning
3618 value # silence pyflakes warning
3615 except KeyError:
3619 except KeyError:
3616 pass
3620 pass
3617
3621
3618 # Set mode tests insertion speed with cache eviction.
3622 # Set mode tests insertion speed with cache eviction.
3619 setseq = []
3623 setseq = []
3620 costs = []
3624 costs = []
3621 for i in _xrange(sets):
3625 for i in _xrange(sets):
3622 setseq.append(random.randint(0, _maxint))
3626 setseq.append(random.randint(0, _maxint))
3623 costs.append(random.choice(costrange))
3627 costs.append(random.choice(costrange))
3624
3628
3625 def doinserts():
3629 def doinserts():
3626 d = util.lrucachedict(size)
3630 d = util.lrucachedict(size)
3627 for v in setseq:
3631 for v in setseq:
3628 d.insert(v, v)
3632 d.insert(v, v)
3629
3633
3630 def doinsertscost():
3634 def doinsertscost():
3631 d = util.lrucachedict(size, maxcost=costlimit)
3635 d = util.lrucachedict(size, maxcost=costlimit)
3632 for i, v in enumerate(setseq):
3636 for i, v in enumerate(setseq):
3633 d.insert(v, v, cost=costs[i])
3637 d.insert(v, v, cost=costs[i])
3634
3638
3635 def dosets():
3639 def dosets():
3636 d = util.lrucachedict(size)
3640 d = util.lrucachedict(size)
3637 for v in setseq:
3641 for v in setseq:
3638 d[v] = v
3642 d[v] = v
3639
3643
3640 # Mixed mode randomly performs gets and sets with eviction.
3644 # Mixed mode randomly performs gets and sets with eviction.
3641 mixedops = []
3645 mixedops = []
3642 for i in _xrange(mixed):
3646 for i in _xrange(mixed):
3643 r = random.randint(0, 100)
3647 r = random.randint(0, 100)
3644 if r < mixedgetfreq:
3648 if r < mixedgetfreq:
3645 op = 0
3649 op = 0
3646 else:
3650 else:
3647 op = 1
3651 op = 1
3648
3652
3649 mixedops.append(
3653 mixedops.append(
3650 (op, random.randint(0, size * 2), random.choice(costrange))
3654 (op, random.randint(0, size * 2), random.choice(costrange))
3651 )
3655 )
3652
3656
3653 def domixed():
3657 def domixed():
3654 d = util.lrucachedict(size)
3658 d = util.lrucachedict(size)
3655
3659
3656 for op, v, cost in mixedops:
3660 for op, v, cost in mixedops:
3657 if op == 0:
3661 if op == 0:
3658 try:
3662 try:
3659 d[v]
3663 d[v]
3660 except KeyError:
3664 except KeyError:
3661 pass
3665 pass
3662 else:
3666 else:
3663 d[v] = v
3667 d[v] = v
3664
3668
3665 def domixedcost():
3669 def domixedcost():
3666 d = util.lrucachedict(size, maxcost=costlimit)
3670 d = util.lrucachedict(size, maxcost=costlimit)
3667
3671
3668 for op, v, cost in mixedops:
3672 for op, v, cost in mixedops:
3669 if op == 0:
3673 if op == 0:
3670 try:
3674 try:
3671 d[v]
3675 d[v]
3672 except KeyError:
3676 except KeyError:
3673 pass
3677 pass
3674 else:
3678 else:
3675 d.insert(v, v, cost=cost)
3679 d.insert(v, v, cost=cost)
3676
3680
3677 benches = [
3681 benches = [
3678 (doinit, b'init'),
3682 (doinit, b'init'),
3679 ]
3683 ]
3680
3684
3681 if costlimit:
3685 if costlimit:
3682 benches.extend(
3686 benches.extend(
3683 [
3687 [
3684 (dogetscost, b'gets w/ cost limit'),
3688 (dogetscost, b'gets w/ cost limit'),
3685 (doinsertscost, b'inserts w/ cost limit'),
3689 (doinsertscost, b'inserts w/ cost limit'),
3686 (domixedcost, b'mixed w/ cost limit'),
3690 (domixedcost, b'mixed w/ cost limit'),
3687 ]
3691 ]
3688 )
3692 )
3689 else:
3693 else:
3690 benches.extend(
3694 benches.extend(
3691 [
3695 [
3692 (dogets, b'gets'),
3696 (dogets, b'gets'),
3693 (doinserts, b'inserts'),
3697 (doinserts, b'inserts'),
3694 (dosets, b'sets'),
3698 (dosets, b'sets'),
3695 (domixed, b'mixed'),
3699 (domixed, b'mixed'),
3696 ]
3700 ]
3697 )
3701 )
3698
3702
3699 for fn, title in benches:
3703 for fn, title in benches:
3700 timer, fm = gettimer(ui, opts)
3704 timer, fm = gettimer(ui, opts)
3701 timer(fn, title=title)
3705 timer(fn, title=title)
3702 fm.end()
3706 fm.end()
3703
3707
3704
3708
3705 @command(b'perfwrite', formatteropts)
3709 @command(b'perfwrite', formatteropts)
3706 def perfwrite(ui, repo, **opts):
3710 def perfwrite(ui, repo, **opts):
3707 """microbenchmark ui.write
3711 """microbenchmark ui.write
3708 """
3712 """
3709 opts = _byteskwargs(opts)
3713 opts = _byteskwargs(opts)
3710
3714
3711 timer, fm = gettimer(ui, opts)
3715 timer, fm = gettimer(ui, opts)
3712
3716
3713 def write():
3717 def write():
3714 for i in range(100000):
3718 for i in range(100000):
3715 ui.writenoi18n(b'Testing write performance\n')
3719 ui.writenoi18n(b'Testing write performance\n')
3716
3720
3717 timer(write)
3721 timer(write)
3718 fm.end()
3722 fm.end()
3719
3723
3720
3724
3721 def uisetup(ui):
3725 def uisetup(ui):
3722 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3726 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3723 commands, b'debugrevlogopts'
3727 commands, b'debugrevlogopts'
3724 ):
3728 ):
3725 # for "historical portability":
3729 # for "historical portability":
3726 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3730 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3727 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3731 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3728 # openrevlog() should cause failure, because it has been
3732 # openrevlog() should cause failure, because it has been
3729 # available since 3.5 (or 49c583ca48c4).
3733 # available since 3.5 (or 49c583ca48c4).
3730 def openrevlog(orig, repo, cmd, file_, opts):
3734 def openrevlog(orig, repo, cmd, file_, opts):
3731 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3735 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3732 raise error.Abort(
3736 raise error.Abort(
3733 b"This version doesn't support --dir option",
3737 b"This version doesn't support --dir option",
3734 hint=b"use 3.5 or later",
3738 hint=b"use 3.5 or later",
3735 )
3739 )
3736 return orig(repo, cmd, file_, opts)
3740 return orig(repo, cmd, file_, opts)
3737
3741
3738 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3742 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3739
3743
3740
3744
3741 @command(
3745 @command(
3742 b'perfprogress',
3746 b'perfprogress',
3743 formatteropts
3747 formatteropts
3744 + [
3748 + [
3745 (b'', b'topic', b'topic', b'topic for progress messages'),
3749 (b'', b'topic', b'topic', b'topic for progress messages'),
3746 (b'c', b'total', 1000000, b'total value we are progressing to'),
3750 (b'c', b'total', 1000000, b'total value we are progressing to'),
3747 ],
3751 ],
3748 norepo=True,
3752 norepo=True,
3749 )
3753 )
3750 def perfprogress(ui, topic=None, total=None, **opts):
3754 def perfprogress(ui, topic=None, total=None, **opts):
3751 """printing of progress bars"""
3755 """printing of progress bars"""
3752 opts = _byteskwargs(opts)
3756 opts = _byteskwargs(opts)
3753
3757
3754 timer, fm = gettimer(ui, opts)
3758 timer, fm = gettimer(ui, opts)
3755
3759
3756 def doprogress():
3760 def doprogress():
3757 with ui.makeprogress(topic, total=total) as progress:
3761 with ui.makeprogress(topic, total=total) as progress:
3758 for i in _xrange(total):
3762 for i in _xrange(total):
3759 progress.increment()
3763 progress.increment()
3760
3764
3761 timer(doprogress)
3765 timer(doprogress)
3762 fm.end()
3766 fm.end()
@@ -1,396 +1,396
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perfaddremove
81 perfaddremove
82 (no help text available)
82 (no help text available)
83 perfancestors
83 perfancestors
84 (no help text available)
84 (no help text available)
85 perfancestorset
85 perfancestorset
86 (no help text available)
86 (no help text available)
87 perfannotate (no help text available)
87 perfannotate (no help text available)
88 perfbdiff benchmark a bdiff between revisions
88 perfbdiff benchmark a bdiff between revisions
89 perfbookmarks
89 perfbookmarks
90 benchmark parsing bookmarks from disk to memory
90 benchmark parsing bookmarks from disk to memory
91 perfbranchmap
91 perfbranchmap
92 benchmark the update of a branchmap
92 benchmark the update of a branchmap
93 perfbranchmapload
93 perfbranchmapload
94 benchmark reading the branchmap
94 benchmark reading the branchmap
95 perfbranchmapupdate
95 perfbranchmapupdate
96 benchmark branchmap update from for <base> revs to <target>
96 benchmark branchmap update from for <base> revs to <target>
97 revs
97 revs
98 perfbundleread
98 perfbundleread
99 Benchmark reading of bundle files.
99 Benchmark reading of bundle files.
100 perfcca (no help text available)
100 perfcca (no help text available)
101 perfchangegroupchangelog
101 perfchangegroupchangelog
102 Benchmark producing a changelog group for a changegroup.
102 Benchmark producing a changelog group for a changegroup.
103 perfchangeset
103 perfchangeset
104 (no help text available)
104 (no help text available)
105 perfctxfiles (no help text available)
105 perfctxfiles (no help text available)
106 perfdiffwd Profile diff of working directory changes
106 perfdiffwd Profile diff of working directory changes
107 perfdirfoldmap
107 perfdirfoldmap
108 (no help text available)
108 (no help text available)
109 perfdirs (no help text available)
109 perfdirs (no help text available)
110 perfdirstate benchmap the time necessary to load a dirstate from scratch
110 perfdirstate benchmap the time necessary to load a dirstate from scratch
111 perfdirstatedirs
111 perfdirstatedirs
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
113 perfdirstatefoldmap
113 perfdirstatefoldmap
114 (no help text available)
114 benchmap a 'dirstate._map.filefoldmap.get()' request
115 perfdirstatewrite
115 perfdirstatewrite
116 (no help text available)
116 (no help text available)
117 perfdiscovery
117 perfdiscovery
118 benchmark discovery between local repo and the peer at given
118 benchmark discovery between local repo and the peer at given
119 path
119 path
120 perffncacheencode
120 perffncacheencode
121 (no help text available)
121 (no help text available)
122 perffncacheload
122 perffncacheload
123 (no help text available)
123 (no help text available)
124 perffncachewrite
124 perffncachewrite
125 (no help text available)
125 (no help text available)
126 perfheads benchmark the computation of a changelog heads
126 perfheads benchmark the computation of a changelog heads
127 perfhelper-mergecopies
127 perfhelper-mergecopies
128 find statistics about potential parameters for
128 find statistics about potential parameters for
129 'perfmergecopies'
129 'perfmergecopies'
130 perfhelper-pathcopies
130 perfhelper-pathcopies
131 find statistic about potential parameters for the
131 find statistic about potential parameters for the
132 'perftracecopies'
132 'perftracecopies'
133 perfignore benchmark operation related to computing ignore
133 perfignore benchmark operation related to computing ignore
134 perfindex benchmark index creation time followed by a lookup
134 perfindex benchmark index creation time followed by a lookup
135 perflinelogedits
135 perflinelogedits
136 (no help text available)
136 (no help text available)
137 perfloadmarkers
137 perfloadmarkers
138 benchmark the time to parse the on-disk markers for a repo
138 benchmark the time to parse the on-disk markers for a repo
139 perflog (no help text available)
139 perflog (no help text available)
140 perflookup (no help text available)
140 perflookup (no help text available)
141 perflrucachedict
141 perflrucachedict
142 (no help text available)
142 (no help text available)
143 perfmanifest benchmark the time to read a manifest from disk and return a
143 perfmanifest benchmark the time to read a manifest from disk and return a
144 usable
144 usable
145 perfmergecalculate
145 perfmergecalculate
146 (no help text available)
146 (no help text available)
147 perfmergecopies
147 perfmergecopies
148 measure runtime of 'copies.mergecopies'
148 measure runtime of 'copies.mergecopies'
149 perfmoonwalk benchmark walking the changelog backwards
149 perfmoonwalk benchmark walking the changelog backwards
150 perfnodelookup
150 perfnodelookup
151 (no help text available)
151 (no help text available)
152 perfnodemap benchmark the time necessary to look up revision from a cold
152 perfnodemap benchmark the time necessary to look up revision from a cold
153 nodemap
153 nodemap
154 perfparents benchmark the time necessary to fetch one changeset's parents.
154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 perfpathcopies
155 perfpathcopies
156 benchmark the copy tracing logic
156 benchmark the copy tracing logic
157 perfphases benchmark phasesets computation
157 perfphases benchmark phasesets computation
158 perfphasesremote
158 perfphasesremote
159 benchmark time needed to analyse phases of the remote server
159 benchmark time needed to analyse phases of the remote server
160 perfprogress printing of progress bars
160 perfprogress printing of progress bars
161 perfrawfiles (no help text available)
161 perfrawfiles (no help text available)
162 perfrevlogchunks
162 perfrevlogchunks
163 Benchmark operations on revlog chunks.
163 Benchmark operations on revlog chunks.
164 perfrevlogindex
164 perfrevlogindex
165 Benchmark operations against a revlog index.
165 Benchmark operations against a revlog index.
166 perfrevlogrevision
166 perfrevlogrevision
167 Benchmark obtaining a revlog revision.
167 Benchmark obtaining a revlog revision.
168 perfrevlogrevisions
168 perfrevlogrevisions
169 Benchmark reading a series of revisions from a revlog.
169 Benchmark reading a series of revisions from a revlog.
170 perfrevlogwrite
170 perfrevlogwrite
171 Benchmark writing a series of revisions to a revlog.
171 Benchmark writing a series of revisions to a revlog.
172 perfrevrange (no help text available)
172 perfrevrange (no help text available)
173 perfrevset benchmark the execution time of a revset
173 perfrevset benchmark the execution time of a revset
174 perfstartup (no help text available)
174 perfstartup (no help text available)
175 perfstatus benchmark the performance of a single status call
175 perfstatus benchmark the performance of a single status call
176 perftags (no help text available)
176 perftags (no help text available)
177 perftemplating
177 perftemplating
178 test the rendering time of a given template
178 test the rendering time of a given template
179 perfunidiff benchmark a unified diff between revisions
179 perfunidiff benchmark a unified diff between revisions
180 perfvolatilesets
180 perfvolatilesets
181 benchmark the computation of various volatile set
181 benchmark the computation of various volatile set
182 perfwalk (no help text available)
182 perfwalk (no help text available)
183 perfwrite microbenchmark ui.write
183 perfwrite microbenchmark ui.write
184
184
185 (use 'hg help -v perf' to show built-in aliases and global options)
185 (use 'hg help -v perf' to show built-in aliases and global options)
186 $ hg perfaddremove
186 $ hg perfaddremove
187 $ hg perfancestors
187 $ hg perfancestors
188 $ hg perfancestorset 2
188 $ hg perfancestorset 2
189 $ hg perfannotate a
189 $ hg perfannotate a
190 $ hg perfbdiff -c 1
190 $ hg perfbdiff -c 1
191 $ hg perfbdiff --alldata 1
191 $ hg perfbdiff --alldata 1
192 $ hg perfunidiff -c 1
192 $ hg perfunidiff -c 1
193 $ hg perfunidiff --alldata 1
193 $ hg perfunidiff --alldata 1
194 $ hg perfbookmarks
194 $ hg perfbookmarks
195 $ hg perfbranchmap
195 $ hg perfbranchmap
196 $ hg perfbranchmapload
196 $ hg perfbranchmapload
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 benchmark of branchmap with 3 revisions with 1 new ones
198 benchmark of branchmap with 3 revisions with 1 new ones
199 $ hg perfcca
199 $ hg perfcca
200 $ hg perfchangegroupchangelog
200 $ hg perfchangegroupchangelog
201 $ hg perfchangegroupchangelog --cgversion 01
201 $ hg perfchangegroupchangelog --cgversion 01
202 $ hg perfchangeset 2
202 $ hg perfchangeset 2
203 $ hg perfctxfiles 2
203 $ hg perfctxfiles 2
204 $ hg perfdiffwd
204 $ hg perfdiffwd
205 $ hg perfdirfoldmap
205 $ hg perfdirfoldmap
206 $ hg perfdirs
206 $ hg perfdirs
207 $ hg perfdirstate
207 $ hg perfdirstate
208 $ hg perfdirstatedirs
208 $ hg perfdirstatedirs
209 $ hg perfdirstatefoldmap
209 $ hg perfdirstatefoldmap
210 $ hg perfdirstatewrite
210 $ hg perfdirstatewrite
211 #if repofncache
211 #if repofncache
212 $ hg perffncacheencode
212 $ hg perffncacheencode
213 $ hg perffncacheload
213 $ hg perffncacheload
214 $ hg debugrebuildfncache
214 $ hg debugrebuildfncache
215 fncache already up to date
215 fncache already up to date
216 $ hg perffncachewrite
216 $ hg perffncachewrite
217 $ hg debugrebuildfncache
217 $ hg debugrebuildfncache
218 fncache already up to date
218 fncache already up to date
219 #endif
219 #endif
220 $ hg perfheads
220 $ hg perfheads
221 $ hg perfignore
221 $ hg perfignore
222 $ hg perfindex
222 $ hg perfindex
223 $ hg perflinelogedits -n 1
223 $ hg perflinelogedits -n 1
224 $ hg perfloadmarkers
224 $ hg perfloadmarkers
225 $ hg perflog
225 $ hg perflog
226 $ hg perflookup 2
226 $ hg perflookup 2
227 $ hg perflrucache
227 $ hg perflrucache
228 $ hg perfmanifest 2
228 $ hg perfmanifest 2
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
230 $ hg perfmanifest -m 44fe2c8352bb
230 $ hg perfmanifest -m 44fe2c8352bb
231 abort: manifest revision must be integer or full node
231 abort: manifest revision must be integer or full node
232 [255]
232 [255]
233 $ hg perfmergecalculate -r 3
233 $ hg perfmergecalculate -r 3
234 $ hg perfmoonwalk
234 $ hg perfmoonwalk
235 $ hg perfnodelookup 2
235 $ hg perfnodelookup 2
236 $ hg perfpathcopies 1 2
236 $ hg perfpathcopies 1 2
237 $ hg perfprogress --total 1000
237 $ hg perfprogress --total 1000
238 $ hg perfrawfiles 2
238 $ hg perfrawfiles 2
239 $ hg perfrevlogindex -c
239 $ hg perfrevlogindex -c
240 #if reporevlogstore
240 #if reporevlogstore
241 $ hg perfrevlogrevisions .hg/store/data/a.i
241 $ hg perfrevlogrevisions .hg/store/data/a.i
242 #endif
242 #endif
243 $ hg perfrevlogrevision -m 0
243 $ hg perfrevlogrevision -m 0
244 $ hg perfrevlogchunks -c
244 $ hg perfrevlogchunks -c
245 $ hg perfrevrange
245 $ hg perfrevrange
246 $ hg perfrevset 'all()'
246 $ hg perfrevset 'all()'
247 $ hg perfstartup
247 $ hg perfstartup
248 $ hg perfstatus
248 $ hg perfstatus
249 $ hg perftags
249 $ hg perftags
250 $ hg perftemplating
250 $ hg perftemplating
251 $ hg perfvolatilesets
251 $ hg perfvolatilesets
252 $ hg perfwalk
252 $ hg perfwalk
253 $ hg perfparents
253 $ hg perfparents
254 $ hg perfdiscovery -q .
254 $ hg perfdiscovery -q .
255
255
256 Test run control
256 Test run control
257 ----------------
257 ----------------
258
258
259 Simple single entry
259 Simple single entry
260
260
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
262 ! wall * comb * user * sys * (best of 15) (glob)
262 ! wall * comb * user * sys * (best of 15) (glob)
263
263
264 Multiple entries
264 Multiple entries
265
265
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
267 ! wall * comb * user * sys * (best of 5) (glob)
267 ! wall * comb * user * sys * (best of 5) (glob)
268
268
269 error case are ignored
269 error case are ignored
270
270
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
272 malformatted run limit entry, missing "-": 500
272 malformatted run limit entry, missing "-": 500
273 ! wall * comb * user * sys * (best of 5) (glob)
273 ! wall * comb * user * sys * (best of 5) (glob)
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
277 ! wall * comb * user * sys * (best of 5) (glob)
277 ! wall * comb * user * sys * (best of 5) (glob)
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
280 ! wall * comb * user * sys * (best of 5) (glob)
280 ! wall * comb * user * sys * (best of 5) (glob)
281
281
282 test actual output
282 test actual output
283 ------------------
283 ------------------
284
284
285 normal output:
285 normal output:
286
286
287 $ hg perfheads --config perf.stub=no
287 $ hg perfheads --config perf.stub=no
288 ! wall * comb * user * sys * (best of *) (glob)
288 ! wall * comb * user * sys * (best of *) (glob)
289
289
290 detailed output:
290 detailed output:
291
291
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
293 ! wall * comb * user * sys * (best of *) (glob)
293 ! wall * comb * user * sys * (best of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
297
297
298 test json output
298 test json output
299 ----------------
299 ----------------
300
300
301 normal output:
301 normal output:
302
302
303 $ hg perfheads --template json --config perf.stub=no
303 $ hg perfheads --template json --config perf.stub=no
304 [
304 [
305 {
305 {
306 "comb": *, (glob)
306 "comb": *, (glob)
307 "count": *, (glob)
307 "count": *, (glob)
308 "sys": *, (glob)
308 "sys": *, (glob)
309 "user": *, (glob)
309 "user": *, (glob)
310 "wall": * (glob)
310 "wall": * (glob)
311 }
311 }
312 ]
312 ]
313
313
314 detailed output:
314 detailed output:
315
315
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
317 [
317 [
318 {
318 {
319 "avg.comb": *, (glob)
319 "avg.comb": *, (glob)
320 "avg.count": *, (glob)
320 "avg.count": *, (glob)
321 "avg.sys": *, (glob)
321 "avg.sys": *, (glob)
322 "avg.user": *, (glob)
322 "avg.user": *, (glob)
323 "avg.wall": *, (glob)
323 "avg.wall": *, (glob)
324 "comb": *, (glob)
324 "comb": *, (glob)
325 "count": *, (glob)
325 "count": *, (glob)
326 "max.comb": *, (glob)
326 "max.comb": *, (glob)
327 "max.count": *, (glob)
327 "max.count": *, (glob)
328 "max.sys": *, (glob)
328 "max.sys": *, (glob)
329 "max.user": *, (glob)
329 "max.user": *, (glob)
330 "max.wall": *, (glob)
330 "max.wall": *, (glob)
331 "median.comb": *, (glob)
331 "median.comb": *, (glob)
332 "median.count": *, (glob)
332 "median.count": *, (glob)
333 "median.sys": *, (glob)
333 "median.sys": *, (glob)
334 "median.user": *, (glob)
334 "median.user": *, (glob)
335 "median.wall": *, (glob)
335 "median.wall": *, (glob)
336 "sys": *, (glob)
336 "sys": *, (glob)
337 "user": *, (glob)
337 "user": *, (glob)
338 "wall": * (glob)
338 "wall": * (glob)
339 }
339 }
340 ]
340 ]
341
341
342 Test pre-run feature
342 Test pre-run feature
343 --------------------
343 --------------------
344
344
345 (perf discovery has some spurious output)
345 (perf discovery has some spurious output)
346
346
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
348 ! wall * comb * user * sys * (best of 1) (glob)
348 ! wall * comb * user * sys * (best of 1) (glob)
349 searching for changes
349 searching for changes
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
351 ! wall * comb * user * sys * (best of 1) (glob)
351 ! wall * comb * user * sys * (best of 1) (glob)
352 searching for changes
352 searching for changes
353 searching for changes
353 searching for changes
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
355 ! wall * comb * user * sys * (best of 1) (glob)
355 ! wall * comb * user * sys * (best of 1) (glob)
356 searching for changes
356 searching for changes
357 searching for changes
357 searching for changes
358 searching for changes
358 searching for changes
359 searching for changes
359 searching for changes
360
360
361 test profile-benchmark option
361 test profile-benchmark option
362 ------------------------------
362 ------------------------------
363
363
364 Function to check that statprof ran
364 Function to check that statprof ran
365 $ statprofran () {
365 $ statprofran () {
366 > egrep 'Sample count:|No samples recorded' > /dev/null
366 > egrep 'Sample count:|No samples recorded' > /dev/null
367 > }
367 > }
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
369
369
370 Check perf.py for historical portability
370 Check perf.py for historical portability
371 ----------------------------------------
371 ----------------------------------------
372
372
373 $ cd "$TESTDIR/.."
373 $ cd "$TESTDIR/.."
374
374
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
378 contrib/perf.py:\d+: (re)
378 contrib/perf.py:\d+: (re)
379 > from mercurial import (
379 > from mercurial import (
380 import newer module separately in try clause for early Mercurial
380 import newer module separately in try clause for early Mercurial
381 contrib/perf.py:\d+: (re)
381 contrib/perf.py:\d+: (re)
382 > from mercurial import (
382 > from mercurial import (
383 import newer module separately in try clause for early Mercurial
383 import newer module separately in try clause for early Mercurial
384 contrib/perf.py:\d+: (re)
384 contrib/perf.py:\d+: (re)
385 > origindexpath = orig.opener.join(orig.indexfile)
385 > origindexpath = orig.opener.join(orig.indexfile)
386 use getvfs()/getsvfs() for early Mercurial
386 use getvfs()/getsvfs() for early Mercurial
387 contrib/perf.py:\d+: (re)
387 contrib/perf.py:\d+: (re)
388 > origdatapath = orig.opener.join(orig.datafile)
388 > origdatapath = orig.opener.join(orig.datafile)
389 use getvfs()/getsvfs() for early Mercurial
389 use getvfs()/getsvfs() for early Mercurial
390 contrib/perf.py:\d+: (re)
390 contrib/perf.py:\d+: (re)
391 > vfs = vfsmod.vfs(tmpdir)
391 > vfs = vfsmod.vfs(tmpdir)
392 use getvfs()/getsvfs() for early Mercurial
392 use getvfs()/getsvfs() for early Mercurial
393 contrib/perf.py:\d+: (re)
393 contrib/perf.py:\d+: (re)
394 > vfs.options = getattr(orig.opener, 'options', None)
394 > vfs.options = getattr(orig.opener, 'options', None)
395 use getvfs()/getsvfs() for early Mercurial
395 use getvfs()/getsvfs() for early Mercurial
396 [1]
396 [1]
General Comments 0
You need to be logged in to leave comments. Login now