##// END OF EJS Templates
perf: allow profiling of more than one run...
marmoute -
r52482:90ef3e04 default
parent child Browse files
Show More
@@ -1,4689 +1,4705
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (by default, the first iteration is benchmarked)
24
25 ``profiled-runs``
26 list of iteration to profile (starting from 0)
24
27
25 ``run-limits``
28 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
29 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
30 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
31 conditions are considered in order with the following logic:
29
32
30 If benchmark has been running for <time> seconds, and we have performed
33 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
34 <numberofrun> iterations, stop the benchmark,
32
35
33 The default value is: `3.0-100, 10.0-3`
36 The default value is: `3.0-100, 10.0-3`
34
37
35 ``stub``
38 ``stub``
36 When set, benchmarks will only be run once, useful for testing
39 When set, benchmarks will only be run once, useful for testing
37 (default: off)
40 (default: off)
38 '''
41 '''
39
42
40 # "historical portability" policy of perf.py:
43 # "historical portability" policy of perf.py:
41 #
44 #
42 # We have to do:
45 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
46 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
47 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
48 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
49 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
50 # version as possible
48 #
51 #
49 # We have to do, if possible with reasonable cost:
52 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
53 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
54 # with early Mercurial
52 #
55 #
53 # We don't have to do:
56 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
57 # - make perf command for recent feature work correctly with early
55 # Mercurial
58 # Mercurial
56
59
57 import contextlib
60 import contextlib
58 import functools
61 import functools
59 import gc
62 import gc
60 import os
63 import os
61 import random
64 import random
62 import shutil
65 import shutil
63 import struct
66 import struct
64 import sys
67 import sys
65 import tempfile
68 import tempfile
66 import threading
69 import threading
67 import time
70 import time
68
71
69 import mercurial.revlog
72 import mercurial.revlog
70 from mercurial import (
73 from mercurial import (
71 changegroup,
74 changegroup,
72 cmdutil,
75 cmdutil,
73 commands,
76 commands,
74 copies,
77 copies,
75 error,
78 error,
76 extensions,
79 extensions,
77 hg,
80 hg,
78 mdiff,
81 mdiff,
79 merge,
82 merge,
80 util,
83 util,
81 )
84 )
82
85
83 # for "historical portability":
86 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
87 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
88 # failure, because these aren't available with early Mercurial
86 try:
89 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
90 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
91 except ImportError:
89 pass
92 pass
90 try:
93 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
94 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
95 except ImportError:
93 pass
96 pass
94 try:
97 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
98 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
99
97 dir(registrar) # forcibly load it
100 dir(registrar) # forcibly load it
98 except ImportError:
101 except ImportError:
99 registrar = None
102 registrar = None
100 try:
103 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
104 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
105 except ImportError:
103 pass
106 pass
104 try:
107 try:
105 from mercurial.utils import repoviewutil # since 5.0
108 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
109 except ImportError:
107 repoviewutil = None
110 repoviewutil = None
108 try:
111 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
112 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
113 except ImportError:
111 pass
114 pass
112 try:
115 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
116 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
117 except ImportError:
115 pass
118 pass
116
119
117 try:
120 try:
118 from mercurial import profiling
121 from mercurial import profiling
119 except ImportError:
122 except ImportError:
120 profiling = None
123 profiling = None
121
124
122 try:
125 try:
123 from mercurial.revlogutils import constants as revlog_constants
126 from mercurial.revlogutils import constants as revlog_constants
124
127
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
128 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
129
127 def revlog(opener, *args, **kwargs):
130 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
131 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
132
130
133
131 except (ImportError, AttributeError):
134 except (ImportError, AttributeError):
132 perf_rl_kind = None
135 perf_rl_kind = None
133
136
134 def revlog(opener, *args, **kwargs):
137 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
138 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
139
137
140
138 def identity(a):
141 def identity(a):
139 return a
142 return a
140
143
141
144
142 try:
145 try:
143 from mercurial import pycompat
146 from mercurial import pycompat
144
147
145 getargspec = pycompat.getargspec # added to module after 4.5
148 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
149 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
150 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
151 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
152 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
153 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
154 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
155 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
156 else:
154 _maxint = sys.maxint
157 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
158 except (NameError, ImportError, AttributeError):
156 import inspect
159 import inspect
157
160
158 getargspec = inspect.getargspec
161 getargspec = inspect.getargspec
159 _byteskwargs = identity
162 _byteskwargs = identity
160 _bytestr = str
163 _bytestr = str
161 fsencode = identity # no py3 support
164 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
165 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
166 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
167 _xrange = xrange
165
168
166 try:
169 try:
167 # 4.7+
170 # 4.7+
168 queue = pycompat.queue.Queue
171 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
172 except (NameError, AttributeError, ImportError):
170 # <4.7.
173 # <4.7.
171 try:
174 try:
172 queue = pycompat.queue
175 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
176 except (NameError, AttributeError, ImportError):
174 import Queue as queue
177 import Queue as queue
175
178
176 try:
179 try:
177 from mercurial import logcmdutil
180 from mercurial import logcmdutil
178
181
179 makelogtemplater = logcmdutil.maketemplater
182 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
181 try:
184 try:
182 makelogtemplater = cmdutil.makelogtemplater
185 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
186 except (AttributeError, ImportError):
184 makelogtemplater = None
187 makelogtemplater = None
185
188
186 # for "historical portability":
189 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
190 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
191 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
192 _undefined = object()
190
193
191
194
192 def safehasattr(thing, attr):
195 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
196 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
197
195
198
196 setattr(util, 'safehasattr', safehasattr)
199 setattr(util, 'safehasattr', safehasattr)
197
200
198 # for "historical portability":
201 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
202 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
203 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
204 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
205 util.timer = time.perf_counter
203 elif os.name == b'nt':
206 elif os.name == b'nt':
204 util.timer = time.clock
207 util.timer = time.clock
205 else:
208 else:
206 util.timer = time.time
209 util.timer = time.time
207
210
208 # for "historical portability":
211 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
212 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
213 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
214 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
215 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
216 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
217 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
218 )
216
219
217 # for "historical portability":
220 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
221 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
222 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
223 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
224 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
225 revlogopts = getattr(
223 cmdutil,
226 cmdutil,
224 "debugrevlogopts",
227 "debugrevlogopts",
225 getattr(
228 getattr(
226 commands,
229 commands,
227 "debugrevlogopts",
230 "debugrevlogopts",
228 [
231 [
229 (b'c', b'changelog', False, b'open changelog'),
232 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
233 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
234 (b'', b'dir', False, b'open directory manifest'),
232 ],
235 ],
233 ),
236 ),
234 )
237 )
235
238
236 cmdtable = {}
239 cmdtable = {}
237
240
238
241
239 # for "historical portability":
242 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
243 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
244 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
245 def parsealiases(cmd):
243 return cmd.split(b"|")
246 return cmd.split(b"|")
244
247
245
248
246 if safehasattr(registrar, 'command'):
249 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
250 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
251 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
252 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
253 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
254 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
255 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
256 # been available since 3.1 (or 75a96326cecb)
254 _command = command
257 _command = command
255
258
256 def command(name, options=(), synopsis=None, norepo=False):
259 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
260 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
261 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
262 return _command(name, list(options), synopsis)
260
263
261
264
262 else:
265 else:
263 # for "historical portability":
266 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
267 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
268 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
269 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
270 def decorator(func):
268 if synopsis:
271 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
272 cmdtable[name] = func, list(options), synopsis
270 else:
273 else:
271 cmdtable[name] = func, list(options)
274 cmdtable[name] = func, list(options)
272 if norepo:
275 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
276 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
277 return func
275
278
276 return decorator
279 return decorator
277
280
278
281
279 try:
282 try:
280 import mercurial.registrar
283 import mercurial.registrar
281 import mercurial.configitems
284 import mercurial.configitems
282
285
283 configtable = {}
286 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
287 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
288 configitem(
286 b'perf',
289 b'perf',
287 b'presleep',
290 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
291 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
292 experimental=True,
290 )
293 )
291 configitem(
294 configitem(
292 b'perf',
295 b'perf',
293 b'stub',
296 b'stub',
294 default=mercurial.configitems.dynamicdefault,
297 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
298 experimental=True,
296 )
299 )
297 configitem(
300 configitem(
298 b'perf',
301 b'perf',
299 b'parentscount',
302 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
303 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
304 experimental=True,
302 )
305 )
303 configitem(
306 configitem(
304 b'perf',
307 b'perf',
305 b'all-timing',
308 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
309 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
310 experimental=True,
308 )
311 )
309 configitem(
312 configitem(
310 b'perf',
313 b'perf',
311 b'pre-run',
314 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
315 default=mercurial.configitems.dynamicdefault,
313 )
316 )
314 configitem(
317 configitem(
315 b'perf',
318 b'perf',
316 b'profile-benchmark',
319 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
320 default=mercurial.configitems.dynamicdefault,
318 )
321 )
319 configitem(
322 configitem(
320 b'perf',
323 b'perf',
324 b'profiled-runs',
325 default=mercurial.configitems.dynamicdefault,
326 )
327 configitem(
328 b'perf',
321 b'run-limits',
329 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
331 experimental=True,
324 )
332 )
325 except (ImportError, AttributeError):
333 except (ImportError, AttributeError):
326 pass
334 pass
327 except TypeError:
335 except TypeError:
328 # compatibility fix for a11fd395e83f
336 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
337 # hg version: 5.2
330 configitem(
338 configitem(
331 b'perf',
339 b'perf',
332 b'presleep',
340 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
341 default=mercurial.configitems.dynamicdefault,
334 )
342 )
335 configitem(
343 configitem(
336 b'perf',
344 b'perf',
337 b'stub',
345 b'stub',
338 default=mercurial.configitems.dynamicdefault,
346 default=mercurial.configitems.dynamicdefault,
339 )
347 )
340 configitem(
348 configitem(
341 b'perf',
349 b'perf',
342 b'parentscount',
350 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
351 default=mercurial.configitems.dynamicdefault,
344 )
352 )
345 configitem(
353 configitem(
346 b'perf',
354 b'perf',
347 b'all-timing',
355 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
356 default=mercurial.configitems.dynamicdefault,
349 )
357 )
350 configitem(
358 configitem(
351 b'perf',
359 b'perf',
352 b'pre-run',
360 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
361 default=mercurial.configitems.dynamicdefault,
354 )
362 )
355 configitem(
363 configitem(
356 b'perf',
364 b'perf',
357 b'profile-benchmark',
365 b'profiled-runs',
358 default=mercurial.configitems.dynamicdefault,
366 default=mercurial.configitems.dynamicdefault,
359 )
367 )
360 configitem(
368 configitem(
361 b'perf',
369 b'perf',
362 b'run-limits',
370 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
371 default=mercurial.configitems.dynamicdefault,
364 )
372 )
365
373
366
374
367 def getlen(ui):
375 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
376 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
377 return lambda x: 1
370 return len
378 return len
371
379
372
380
373 class noop:
381 class noop:
374 """dummy context manager"""
382 """dummy context manager"""
375
383
376 def __enter__(self):
384 def __enter__(self):
377 pass
385 pass
378
386
379 def __exit__(self, *args):
387 def __exit__(self, *args):
380 pass
388 pass
381
389
382
390
383 NOOPCTX = noop()
391 NOOPCTX = noop()
384
392
385
393
386 def gettimer(ui, opts=None):
394 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
395 """return a timer function and formatter: (timer, formatter)
388
396
389 This function exists to gather the creation of formatter in a single
397 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
398 place instead of duplicating it in all performance commands."""
391
399
392 # enforce an idle period before execution to counteract power management
400 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
401 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
402 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
403
396 if opts is None:
404 if opts is None:
397 opts = {}
405 opts = {}
398 # redirect all to stderr unless buffer api is in use
406 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
407 if not ui._buffers:
400 ui = ui.copy()
408 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
409 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
410 if uifout:
403 # for "historical portability":
411 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
412 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
413 uifout.set(ui.ferr)
406
414
407 # get a formatter
415 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
416 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
417 if uiformatter:
410 fm = uiformatter(b'perf', opts)
418 fm = uiformatter(b'perf', opts)
411 else:
419 else:
412 # for "historical portability":
420 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
421 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
422 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
423 from mercurial import node
416
424
417 class defaultformatter:
425 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
426 """Minimized composition of baseformatter and plainformatter"""
419
427
420 def __init__(self, ui, topic, opts):
428 def __init__(self, ui, topic, opts):
421 self._ui = ui
429 self._ui = ui
422 if ui.debugflag:
430 if ui.debugflag:
423 self.hexfunc = node.hex
431 self.hexfunc = node.hex
424 else:
432 else:
425 self.hexfunc = node.short
433 self.hexfunc = node.short
426
434
427 def __nonzero__(self):
435 def __nonzero__(self):
428 return False
436 return False
429
437
430 __bool__ = __nonzero__
438 __bool__ = __nonzero__
431
439
432 def startitem(self):
440 def startitem(self):
433 pass
441 pass
434
442
435 def data(self, **data):
443 def data(self, **data):
436 pass
444 pass
437
445
438 def write(self, fields, deftext, *fielddata, **opts):
446 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
447 self._ui.write(deftext % fielddata, **opts)
440
448
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
449 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
450 if cond:
443 self._ui.write(deftext % fielddata, **opts)
451 self._ui.write(deftext % fielddata, **opts)
444
452
445 def plain(self, text, **opts):
453 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
454 self._ui.write(text, **opts)
447
455
448 def end(self):
456 def end(self):
449 pass
457 pass
450
458
451 fm = defaultformatter(ui, b'perf', opts)
459 fm = defaultformatter(ui, b'perf', opts)
452
460
453 # stub function, runs code only once instead of in a loop
461 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
462 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
463 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
464 return functools.partial(stub_timer, fm), fm
457
465
458 # experimental config: perf.all-timing
466 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", True)
467 displayall = ui.configbool(b"perf", b"all-timing", True)
460
468
461 # experimental config: perf.run-limits
469 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
470 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
471 limits = []
464 for item in limitspec:
472 for item in limitspec:
465 parts = item.split(b'-', 1)
473 parts = item.split(b'-', 1)
466 if len(parts) < 2:
474 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
475 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
476 continue
469 try:
477 try:
470 time_limit = float(_sysstr(parts[0]))
478 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
479 except ValueError as e:
472 ui.warn(
480 ui.warn(
473 (
481 (
474 b'malformatted run limit entry, %s: %s\n'
482 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
483 % (_bytestr(e), item)
476 )
484 )
477 )
485 )
478 continue
486 continue
479 try:
487 try:
480 run_limit = int(_sysstr(parts[1]))
488 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
489 except ValueError as e:
482 ui.warn(
490 ui.warn(
483 (
491 (
484 b'malformatted run limit entry, %s: %s\n'
492 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
493 % (_bytestr(e), item)
486 )
494 )
487 )
495 )
488 continue
496 continue
489 limits.append((time_limit, run_limit))
497 limits.append((time_limit, run_limit))
490 if not limits:
498 if not limits:
491 limits = DEFAULTLIMITS
499 limits = DEFAULTLIMITS
492
500
493 profiler = None
501 profiler = None
502 profiled_runs = set()
494 if profiling is not None:
503 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
504 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
505 profiler = lambda: profiling.profile(ui)
506 for run in ui.configlist(b"perf", b"profiled-runs", [0]):
507 profiled_runs.add(int(run))
497
508
498 prerun = getint(ui, b"perf", b"pre-run", 0)
509 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
510 t = functools.partial(
500 _timer,
511 _timer,
501 fm,
512 fm,
502 displayall=displayall,
513 displayall=displayall,
503 limits=limits,
514 limits=limits,
504 prerun=prerun,
515 prerun=prerun,
505 profiler=profiler,
516 profiler=profiler,
517 profiled_runs=profiled_runs,
506 )
518 )
507 return t, fm
519 return t, fm
508
520
509
521
510 def stub_timer(fm, func, setup=None, title=None):
522 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
523 if setup is not None:
512 setup()
524 setup()
513 func()
525 func()
514
526
515
527
516 @contextlib.contextmanager
528 @contextlib.contextmanager
517 def timeone():
529 def timeone():
518 r = []
530 r = []
519 ostart = os.times()
531 ostart = os.times()
520 cstart = util.timer()
532 cstart = util.timer()
521 yield r
533 yield r
522 cstop = util.timer()
534 cstop = util.timer()
523 ostop = os.times()
535 ostop = os.times()
524 a, b = ostart, ostop
536 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
537 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
538
527
539
528 # list of stop condition (elapsed time, minimal run count)
540 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
541 DEFAULTLIMITS = (
530 (3.0, 100),
542 (3.0, 100),
531 (10.0, 3),
543 (10.0, 3),
532 )
544 )
533
545
534
546
535 @contextlib.contextmanager
547 @contextlib.contextmanager
536 def noop_context():
548 def noop_context():
537 yield
549 yield
538
550
539
551
540 def _timer(
552 def _timer(
541 fm,
553 fm,
542 func,
554 func,
543 setup=None,
555 setup=None,
544 context=noop_context,
556 context=noop_context,
545 title=None,
557 title=None,
546 displayall=False,
558 displayall=False,
547 limits=DEFAULTLIMITS,
559 limits=DEFAULTLIMITS,
548 prerun=0,
560 prerun=0,
549 profiler=None,
561 profiler=None,
562 profiled_runs=(0,),
550 ):
563 ):
551 gc.collect()
564 gc.collect()
552 results = []
565 results = []
553 begin = util.timer()
566 begin = util.timer()
554 count = 0
567 count = 0
555 if profiler is None:
568 if profiler is None:
556 profiler = NOOPCTX
569 profiler = lambda: NOOPCTX
557 for i in range(prerun):
570 for i in range(prerun):
558 if setup is not None:
571 if setup is not None:
559 setup()
572 setup()
560 with context():
573 with context():
561 func()
574 func()
562 keepgoing = True
575 keepgoing = True
563 while keepgoing:
576 while keepgoing:
577 if count in profiled_runs:
578 prof = profiler()
579 else:
580 prof = NOOPCTX
564 if setup is not None:
581 if setup is not None:
565 setup()
582 setup()
566 with context():
583 with context():
567 with profiler:
584 with prof:
568 with timeone() as item:
585 with timeone() as item:
569 r = func()
586 r = func()
570 profiler = NOOPCTX
571 count += 1
587 count += 1
572 results.append(item[0])
588 results.append(item[0])
573 cstop = util.timer()
589 cstop = util.timer()
574 # Look for a stop condition.
590 # Look for a stop condition.
575 elapsed = cstop - begin
591 elapsed = cstop - begin
576 for t, mincount in limits:
592 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
593 if elapsed >= t and count >= mincount:
578 keepgoing = False
594 keepgoing = False
579 break
595 break
580
596
581 formatone(fm, results, title=title, result=r, displayall=displayall)
597 formatone(fm, results, title=title, result=r, displayall=displayall)
582
598
583
599
584 def formatone(fm, timings, title=None, result=None, displayall=False):
600 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
601 count = len(timings)
586
602
587 fm.startitem()
603 fm.startitem()
588
604
589 if title:
605 if title:
590 fm.write(b'title', b'! %s\n', title)
606 fm.write(b'title', b'! %s\n', title)
591 if result:
607 if result:
592 fm.write(b'result', b'! result: %s\n', result)
608 fm.write(b'result', b'! result: %s\n', result)
593
609
594 def display(role, entry):
610 def display(role, entry):
595 prefix = b''
611 prefix = b''
596 if role != b'best':
612 if role != b'best':
597 prefix = b'%s.' % role
613 prefix = b'%s.' % role
598 fm.plain(b'!')
614 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
615 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
616 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
617 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
618 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
619 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
620 fm.plain(b'\n')
605
621
606 timings.sort()
622 timings.sort()
607 min_val = timings[0]
623 min_val = timings[0]
608 display(b'best', min_val)
624 display(b'best', min_val)
609 if displayall:
625 if displayall:
610 max_val = timings[-1]
626 max_val = timings[-1]
611 display(b'max', max_val)
627 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
628 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
629 display(b'avg', avg)
614 median = timings[len(timings) // 2]
630 median = timings[len(timings) // 2]
615 display(b'median', median)
631 display(b'median', median)
616
632
617
633
618 # utilities for historical portability
634 # utilities for historical portability
619
635
620
636
621 def getint(ui, section, name, default):
637 def getint(ui, section, name, default):
622 # for "historical portability":
638 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
639 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
640 v = ui.config(section, name, None)
625 if v is None:
641 if v is None:
626 return default
642 return default
627 try:
643 try:
628 return int(v)
644 return int(v)
629 except ValueError:
645 except ValueError:
630 raise error.ConfigError(
646 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
647 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
648 )
633
649
634
650
635 def safeattrsetter(obj, name, ignoremissing=False):
651 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
652 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
653
638 This function is aborted, if 'obj' doesn't have 'name' attribute
654 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
655 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
656 breaks assumption of performance measurement, in the future.
641
657
642 This function returns the object to (1) assign a new value, and
658 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
659 (2) restore an original value to the attribute.
644
660
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
661 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
662 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
663 examine an attribute, which isn't ensured in all Mercurial
648 versions.
664 versions.
649 """
665 """
650 if not util.safehasattr(obj, name):
666 if not util.safehasattr(obj, name):
651 if ignoremissing:
667 if ignoremissing:
652 return None
668 return None
653 raise error.Abort(
669 raise error.Abort(
654 (
670 (
655 b"missing attribute %s of %s might break assumption"
671 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
672 b" of performance measurement"
657 )
673 )
658 % (name, obj)
674 % (name, obj)
659 )
675 )
660
676
661 origvalue = getattr(obj, _sysstr(name))
677 origvalue = getattr(obj, _sysstr(name))
662
678
663 class attrutil:
679 class attrutil:
664 def set(self, newvalue):
680 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
681 setattr(obj, _sysstr(name), newvalue)
666
682
667 def restore(self):
683 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
684 setattr(obj, _sysstr(name), origvalue)
669
685
670 return attrutil()
686 return attrutil()
671
687
672
688
673 # utilities to examine each internal API changes
689 # utilities to examine each internal API changes
674
690
675
691
676 def getbranchmapsubsettable():
692 def getbranchmapsubsettable():
677 # for "historical portability":
693 # for "historical portability":
678 # subsettable is defined in:
694 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
695 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
696 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
697 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
698 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
699 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
700 if subsettable:
685 return subsettable
701 return subsettable
686
702
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
703 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
704 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
705 # doesn't)
690 raise error.Abort(
706 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
707 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
708 hint=b"use 2.5 or later",
693 )
709 )
694
710
695
711
696 def getsvfs(repo):
712 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
713 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
714 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
715 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
716 svfs = getattr(repo, 'svfs', None)
701 if svfs:
717 if svfs:
702 return svfs
718 return svfs
703 else:
719 else:
704 return getattr(repo, 'sopener')
720 return getattr(repo, 'sopener')
705
721
706
722
707 def getvfs(repo):
723 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
724 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
725 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
726 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
727 vfs = getattr(repo, 'vfs', None)
712 if vfs:
728 if vfs:
713 return vfs
729 return vfs
714 else:
730 else:
715 return getattr(repo, 'opener')
731 return getattr(repo, 'opener')
716
732
717
733
718 def repocleartagscachefunc(repo):
734 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
735 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
736 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
737 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
738 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
739 # expect _tagscache to be a structured object.
724 def clearcache():
740 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
741 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
742 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
743 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
744 del repo.__dict__['_tagscache']
729
745
730 return clearcache
746 return clearcache
731
747
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
748 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
749 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
750 return lambda: repotags.set(None)
735
751
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
752 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
753 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
754 return lambda: repotagscache.set(None)
739
755
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
756 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
757 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
758 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
759 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
760 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
761 raise error.Abort(b"tags API of this hg command is unknown")
746
762
747
763
748 # utilities to clear cache
764 # utilities to clear cache
749
765
750
766
751 def clearfilecache(obj, attrname):
767 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
768 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
769 if unfiltered is not None:
754 obj = obj.unfiltered()
770 obj = obj.unfiltered()
755 if attrname in vars(obj):
771 if attrname in vars(obj):
756 delattr(obj, attrname)
772 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
773 obj._filecache.pop(attrname, None)
758
774
759
775
760 def clearchangelog(repo):
776 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
777 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
778 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
779 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
780 clearfilecache(repo.unfiltered(), 'changelog')
765
781
766
782
767 # perf commands
783 # perf commands
768
784
769
785
770 @command(b'perf::walk|perfwalk', formatteropts)
786 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
787 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
788 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
789 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
790 m = scmutil.match(repo[None], pats, {})
775 timer(
791 timer(
776 lambda: len(
792 lambda: len(
777 list(
793 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
794 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
795 )
780 )
796 )
781 )
797 )
782 fm.end()
798 fm.end()
783
799
784
800
785 @command(b'perf::annotate|perfannotate', formatteropts)
801 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
802 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
803 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
804 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
805 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
806 timer(lambda: len(fc.annotate(True)))
791 fm.end()
807 fm.end()
792
808
793
809
794 @command(
810 @command(
795 b'perf::status|perfstatus',
811 b'perf::status|perfstatus',
796 [
812 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
813 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
814 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
815 ]
800 + formatteropts,
816 + formatteropts,
801 )
817 )
802 def perfstatus(ui, repo, **opts):
818 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
819 """benchmark the performance of a single status call
804
820
805 The repository data are preserved between each call.
821 The repository data are preserved between each call.
806
822
807 By default, only the status of the tracked file are requested. If
823 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
824 `--unknown` is passed, the "unknown" files are also tracked.
809 """
825 """
810 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
827 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
828 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
829 # False))))
814 timer, fm = gettimer(ui, opts)
830 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
831 if opts[b'dirstate']:
816 dirstate = repo.dirstate
832 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
833 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
834 unknown = opts[b'unknown']
819
835
820 def status_dirstate():
836 def status_dirstate():
821 s = dirstate.status(
837 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
838 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
839 )
824 sum(map(bool, s))
840 sum(map(bool, s))
825
841
826 if util.safehasattr(dirstate, 'running_status'):
842 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
843 with dirstate.running_status(repo):
828 timer(status_dirstate)
844 timer(status_dirstate)
829 dirstate.invalidate()
845 dirstate.invalidate()
830 else:
846 else:
831 timer(status_dirstate)
847 timer(status_dirstate)
832 else:
848 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
849 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
850 fm.end()
835
851
836
852
837 @command(b'perf::addremove|perfaddremove', formatteropts)
853 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
854 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
855 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
856 timer, fm = gettimer(ui, opts)
841 try:
857 try:
842 oldquiet = repo.ui.quiet
858 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
859 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
860 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
861 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
862 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
863 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
864 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
865 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
866 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
867 finally:
852 repo.ui.quiet = oldquiet
868 repo.ui.quiet = oldquiet
853 fm.end()
869 fm.end()
854
870
855
871
856 def clearcaches(cl):
872 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
873 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
874 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
875 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
876 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
877 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
878 from mercurial.node import nullid, nullrev
863
879
864 cl._nodecache = {nullid: nullrev}
880 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
881 cl._nodepos = None
866
882
867
883
868 @command(b'perf::heads|perfheads', formatteropts)
884 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
885 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
886 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
887 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
888 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
889 cl = repo.changelog
874
890
875 def s():
891 def s():
876 clearcaches(cl)
892 clearcaches(cl)
877
893
878 def d():
894 def d():
879 len(cl.headrevs())
895 len(cl.headrevs())
880
896
881 timer(d, setup=s)
897 timer(d, setup=s)
882 fm.end()
898 fm.end()
883
899
884
900
885 def _default_clear_on_disk_tags_cache(repo):
901 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
902 from mercurial import tags
887
903
888 repo.cachevfs.tryunlink(tags._filename(repo))
904 repo.cachevfs.tryunlink(tags._filename(repo))
889
905
890
906
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
907 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
908 from mercurial import tags
893
909
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
910 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
911
896
912
897 def _default_forget_fnodes(repo, revs):
913 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
914 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
915 fnodes cache"""
900 from mercurial import tags
916 from mercurial import tags
901
917
902 missing_1 = b'\xff' * 4
918 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
919 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
920 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
921 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
922 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
923 cache.write()
908
924
909
925
910 @command(
926 @command(
911 b'perf::tags|perftags',
927 b'perf::tags|perftags',
912 formatteropts
928 formatteropts
913 + [
929 + [
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
930 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
931 (
916 b'',
932 b'',
917 b'clear-on-disk-cache',
933 b'clear-on-disk-cache',
918 False,
934 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
935 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
936 ),
921 (
937 (
922 b'',
938 b'',
923 b'clear-fnode-cache-all',
939 b'clear-fnode-cache-all',
924 False,
940 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
941 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
942 ),
927 (
943 (
928 b'',
944 b'',
929 b'clear-fnode-cache-rev',
945 b'clear-fnode-cache-rev',
930 [],
946 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
947 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
948 b'REVS',
933 ),
949 ),
934 (
950 (
935 b'',
951 b'',
936 b'update-last',
952 b'update-last',
937 b'',
953 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
954 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
955 b'N',
940 ),
956 ),
941 ],
957 ],
942 )
958 )
943 def perftags(ui, repo, **opts):
959 def perftags(ui, repo, **opts):
944 """Benchmark tags retrieval in various situation
960 """Benchmark tags retrieval in various situation
945
961
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
962 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 altering performance after the command was run. However, it does not
963 altering performance after the command was run. However, it does not
948 destroy any stored data.
964 destroy any stored data.
949 """
965 """
950 from mercurial import tags
966 from mercurial import tags
951
967
952 opts = _byteskwargs(opts)
968 opts = _byteskwargs(opts)
953 timer, fm = gettimer(ui, opts)
969 timer, fm = gettimer(ui, opts)
954 repocleartagscache = repocleartagscachefunc(repo)
970 repocleartagscache = repocleartagscachefunc(repo)
955 clearrevlogs = opts[b'clear_revlogs']
971 clearrevlogs = opts[b'clear_revlogs']
956 clear_disk = opts[b'clear_on_disk_cache']
972 clear_disk = opts[b'clear_on_disk_cache']
957 clear_fnode = opts[b'clear_fnode_cache_all']
973 clear_fnode = opts[b'clear_fnode_cache_all']
958
974
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
975 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
976 update_last_str = opts[b'update_last']
961 update_last = None
977 update_last = None
962 if update_last_str:
978 if update_last_str:
963 try:
979 try:
964 update_last = int(update_last_str)
980 update_last = int(update_last_str)
965 except ValueError:
981 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
982 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
983 msg %= update_last_str
968 hint = b'value should be an integer'
984 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
985 raise error.Abort(msg, hint=hint)
970
986
971 clear_disk_fn = getattr(
987 clear_disk_fn = getattr(
972 tags,
988 tags,
973 "clear_cache_on_disk",
989 "clear_cache_on_disk",
974 _default_clear_on_disk_tags_cache,
990 _default_clear_on_disk_tags_cache,
975 )
991 )
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
992 if getattr(tags, 'clear_cache_fnodes_is_working', False):
977 clear_fnodes_fn = tags.clear_cache_fnodes
993 clear_fnodes_fn = tags.clear_cache_fnodes
978 else:
994 else:
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
995 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
980 clear_fnodes_rev_fn = getattr(
996 clear_fnodes_rev_fn = getattr(
981 tags,
997 tags,
982 "forget_fnodes",
998 "forget_fnodes",
983 _default_forget_fnodes,
999 _default_forget_fnodes,
984 )
1000 )
985
1001
986 clear_revs = []
1002 clear_revs = []
987 if clear_fnode_revs:
1003 if clear_fnode_revs:
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
1004 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
989
1005
990 if update_last:
1006 if update_last:
991 revset = b'last(all(), %d)' % update_last
1007 revset = b'last(all(), %d)' % update_last
992 last_revs = repo.unfiltered().revs(revset)
1008 last_revs = repo.unfiltered().revs(revset)
993 clear_revs.extend(last_revs)
1009 clear_revs.extend(last_revs)
994
1010
995 from mercurial import repoview
1011 from mercurial import repoview
996
1012
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
1013 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
1014 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 filter_id = repoview.extrafilter(repo.ui)
1015 filter_id = repoview.extrafilter(repo.ui)
1000
1016
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1017 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 pre_repo = repo.filtered(filter_name)
1018 pre_repo = repo.filtered(filter_name)
1003 pre_repo.tags() # warm the cache
1019 pre_repo.tags() # warm the cache
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1020 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1021 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006
1022
1007 clear_revs = sorted(set(clear_revs))
1023 clear_revs = sorted(set(clear_revs))
1008
1024
1009 def s():
1025 def s():
1010 if update_last:
1026 if update_last:
1011 util.copyfile(old_tags_path, new_tags_path)
1027 util.copyfile(old_tags_path, new_tags_path)
1012 if clearrevlogs:
1028 if clearrevlogs:
1013 clearchangelog(repo)
1029 clearchangelog(repo)
1014 clearfilecache(repo.unfiltered(), 'manifest')
1030 clearfilecache(repo.unfiltered(), 'manifest')
1015 if clear_disk:
1031 if clear_disk:
1016 clear_disk_fn(repo)
1032 clear_disk_fn(repo)
1017 if clear_fnode:
1033 if clear_fnode:
1018 clear_fnodes_fn(repo)
1034 clear_fnodes_fn(repo)
1019 elif clear_revs:
1035 elif clear_revs:
1020 clear_fnodes_rev_fn(repo, clear_revs)
1036 clear_fnodes_rev_fn(repo, clear_revs)
1021 repocleartagscache()
1037 repocleartagscache()
1022
1038
1023 def t():
1039 def t():
1024 len(repo.tags())
1040 len(repo.tags())
1025
1041
1026 timer(t, setup=s)
1042 timer(t, setup=s)
1027 fm.end()
1043 fm.end()
1028
1044
1029
1045
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1046 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 def perfancestors(ui, repo, **opts):
1047 def perfancestors(ui, repo, **opts):
1032 opts = _byteskwargs(opts)
1048 opts = _byteskwargs(opts)
1033 timer, fm = gettimer(ui, opts)
1049 timer, fm = gettimer(ui, opts)
1034 heads = repo.changelog.headrevs()
1050 heads = repo.changelog.headrevs()
1035
1051
1036 def d():
1052 def d():
1037 for a in repo.changelog.ancestors(heads):
1053 for a in repo.changelog.ancestors(heads):
1038 pass
1054 pass
1039
1055
1040 timer(d)
1056 timer(d)
1041 fm.end()
1057 fm.end()
1042
1058
1043
1059
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1060 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 def perfancestorset(ui, repo, revset, **opts):
1061 def perfancestorset(ui, repo, revset, **opts):
1046 opts = _byteskwargs(opts)
1062 opts = _byteskwargs(opts)
1047 timer, fm = gettimer(ui, opts)
1063 timer, fm = gettimer(ui, opts)
1048 revs = repo.revs(revset)
1064 revs = repo.revs(revset)
1049 heads = repo.changelog.headrevs()
1065 heads = repo.changelog.headrevs()
1050
1066
1051 def d():
1067 def d():
1052 s = repo.changelog.ancestors(heads)
1068 s = repo.changelog.ancestors(heads)
1053 for rev in revs:
1069 for rev in revs:
1054 rev in s
1070 rev in s
1055
1071
1056 timer(d)
1072 timer(d)
1057 fm.end()
1073 fm.end()
1058
1074
1059
1075
1060 @command(
1076 @command(
1061 b'perf::delta-find',
1077 b'perf::delta-find',
1062 revlogopts + formatteropts,
1078 revlogopts + formatteropts,
1063 b'-c|-m|FILE REV',
1079 b'-c|-m|FILE REV',
1064 )
1080 )
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1081 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 """benchmark the process of finding a valid delta for a revlog revision
1082 """benchmark the process of finding a valid delta for a revlog revision
1067
1083
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1084 When a revlog receives a new revision (e.g. from a commit, or from an
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1085 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 This perf command measures how much time we spend in this process. It
1086 This perf command measures how much time we spend in this process. It
1071 operates on an already stored revision.
1087 operates on an already stored revision.
1072
1088
1073 See `hg help debug-delta-find` for another related command.
1089 See `hg help debug-delta-find` for another related command.
1074 """
1090 """
1075 from mercurial import revlogutils
1091 from mercurial import revlogutils
1076 import mercurial.revlogutils.deltas as deltautil
1092 import mercurial.revlogutils.deltas as deltautil
1077
1093
1078 opts = _byteskwargs(opts)
1094 opts = _byteskwargs(opts)
1079 if arg_2 is None:
1095 if arg_2 is None:
1080 file_ = None
1096 file_ = None
1081 rev = arg_1
1097 rev = arg_1
1082 else:
1098 else:
1083 file_ = arg_1
1099 file_ = arg_1
1084 rev = arg_2
1100 rev = arg_2
1085
1101
1086 repo = repo.unfiltered()
1102 repo = repo.unfiltered()
1087
1103
1088 timer, fm = gettimer(ui, opts)
1104 timer, fm = gettimer(ui, opts)
1089
1105
1090 rev = int(rev)
1106 rev = int(rev)
1091
1107
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1108 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093
1109
1094 deltacomputer = deltautil.deltacomputer(revlog)
1110 deltacomputer = deltautil.deltacomputer(revlog)
1095
1111
1096 node = revlog.node(rev)
1112 node = revlog.node(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1113 p1r, p2r = revlog.parentrevs(rev)
1098 p1 = revlog.node(p1r)
1114 p1 = revlog.node(p1r)
1099 p2 = revlog.node(p2r)
1115 p2 = revlog.node(p2r)
1100 full_text = revlog.revision(rev)
1116 full_text = revlog.revision(rev)
1101 textlen = len(full_text)
1117 textlen = len(full_text)
1102 cachedelta = None
1118 cachedelta = None
1103 flags = revlog.flags(rev)
1119 flags = revlog.flags(rev)
1104
1120
1105 revinfo = revlogutils.revisioninfo(
1121 revinfo = revlogutils.revisioninfo(
1106 node,
1122 node,
1107 p1,
1123 p1,
1108 p2,
1124 p2,
1109 [full_text], # btext
1125 [full_text], # btext
1110 textlen,
1126 textlen,
1111 cachedelta,
1127 cachedelta,
1112 flags,
1128 flags,
1113 )
1129 )
1114
1130
1115 # Note: we should probably purge the potential caches (like the full
1131 # Note: we should probably purge the potential caches (like the full
1116 # manifest cache) between runs.
1132 # manifest cache) between runs.
1117 def find_one():
1133 def find_one():
1118 with revlog._datafp() as fh:
1134 with revlog._datafp() as fh:
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1135 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120
1136
1121 timer(find_one)
1137 timer(find_one)
1122 fm.end()
1138 fm.end()
1123
1139
1124
1140
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1141 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 def perfdiscovery(ui, repo, path, **opts):
1142 def perfdiscovery(ui, repo, path, **opts):
1127 """benchmark discovery between local repo and the peer at given path"""
1143 """benchmark discovery between local repo and the peer at given path"""
1128 repos = [repo, None]
1144 repos = [repo, None]
1129 timer, fm = gettimer(ui, opts)
1145 timer, fm = gettimer(ui, opts)
1130
1146
1131 try:
1147 try:
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1148 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133
1149
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1150 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 except ImportError:
1151 except ImportError:
1136 try:
1152 try:
1137 from mercurial.utils.urlutil import get_unique_pull_path
1153 from mercurial.utils.urlutil import get_unique_pull_path
1138
1154
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1155 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 except ImportError:
1156 except ImportError:
1141 path = ui.expandpath(path)
1157 path = ui.expandpath(path)
1142
1158
1143 def s():
1159 def s():
1144 repos[1] = hg.peer(ui, opts, path)
1160 repos[1] = hg.peer(ui, opts, path)
1145
1161
1146 def d():
1162 def d():
1147 setdiscovery.findcommonheads(ui, *repos)
1163 setdiscovery.findcommonheads(ui, *repos)
1148
1164
1149 timer(d, setup=s)
1165 timer(d, setup=s)
1150 fm.end()
1166 fm.end()
1151
1167
1152
1168
1153 @command(
1169 @command(
1154 b'perf::bookmarks|perfbookmarks',
1170 b'perf::bookmarks|perfbookmarks',
1155 formatteropts
1171 formatteropts
1156 + [
1172 + [
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1173 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 ],
1174 ],
1159 )
1175 )
1160 def perfbookmarks(ui, repo, **opts):
1176 def perfbookmarks(ui, repo, **opts):
1161 """benchmark parsing bookmarks from disk to memory"""
1177 """benchmark parsing bookmarks from disk to memory"""
1162 opts = _byteskwargs(opts)
1178 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1179 timer, fm = gettimer(ui, opts)
1164
1180
1165 clearrevlogs = opts[b'clear_revlogs']
1181 clearrevlogs = opts[b'clear_revlogs']
1166
1182
1167 def s():
1183 def s():
1168 if clearrevlogs:
1184 if clearrevlogs:
1169 clearchangelog(repo)
1185 clearchangelog(repo)
1170 clearfilecache(repo, b'_bookmarks')
1186 clearfilecache(repo, b'_bookmarks')
1171
1187
1172 def d():
1188 def d():
1173 repo._bookmarks
1189 repo._bookmarks
1174
1190
1175 timer(d, setup=s)
1191 timer(d, setup=s)
1176 fm.end()
1192 fm.end()
1177
1193
1178
1194
1179 @command(
1195 @command(
1180 b'perf::bundle',
1196 b'perf::bundle',
1181 [
1197 [
1182 (
1198 (
1183 b'r',
1199 b'r',
1184 b'rev',
1200 b'rev',
1185 [],
1201 [],
1186 b'changesets to bundle',
1202 b'changesets to bundle',
1187 b'REV',
1203 b'REV',
1188 ),
1204 ),
1189 (
1205 (
1190 b't',
1206 b't',
1191 b'type',
1207 b'type',
1192 b'none',
1208 b'none',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1209 b'bundlespec to use (see `hg help bundlespec`)',
1194 b'TYPE',
1210 b'TYPE',
1195 ),
1211 ),
1196 ]
1212 ]
1197 + formatteropts,
1213 + formatteropts,
1198 b'REVS',
1214 b'REVS',
1199 )
1215 )
1200 def perfbundle(ui, repo, *revs, **opts):
1216 def perfbundle(ui, repo, *revs, **opts):
1201 """benchmark the creation of a bundle from a repository
1217 """benchmark the creation of a bundle from a repository
1202
1218
1203 For now, this only supports "none" compression.
1219 For now, this only supports "none" compression.
1204 """
1220 """
1205 try:
1221 try:
1206 from mercurial import bundlecaches
1222 from mercurial import bundlecaches
1207
1223
1208 parsebundlespec = bundlecaches.parsebundlespec
1224 parsebundlespec = bundlecaches.parsebundlespec
1209 except ImportError:
1225 except ImportError:
1210 from mercurial import exchange
1226 from mercurial import exchange
1211
1227
1212 parsebundlespec = exchange.parsebundlespec
1228 parsebundlespec = exchange.parsebundlespec
1213
1229
1214 from mercurial import discovery
1230 from mercurial import discovery
1215 from mercurial import bundle2
1231 from mercurial import bundle2
1216
1232
1217 opts = _byteskwargs(opts)
1233 opts = _byteskwargs(opts)
1218 timer, fm = gettimer(ui, opts)
1234 timer, fm = gettimer(ui, opts)
1219
1235
1220 cl = repo.changelog
1236 cl = repo.changelog
1221 revs = list(revs)
1237 revs = list(revs)
1222 revs.extend(opts.get(b'rev', ()))
1238 revs.extend(opts.get(b'rev', ()))
1223 revs = scmutil.revrange(repo, revs)
1239 revs = scmutil.revrange(repo, revs)
1224 if not revs:
1240 if not revs:
1225 raise error.Abort(b"not revision specified")
1241 raise error.Abort(b"not revision specified")
1226 # make it a consistent set (ie: without topological gaps)
1242 # make it a consistent set (ie: without topological gaps)
1227 old_len = len(revs)
1243 old_len = len(revs)
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1244 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 if old_len != len(revs):
1245 if old_len != len(revs):
1230 new_count = len(revs) - old_len
1246 new_count = len(revs) - old_len
1231 msg = b"add %d new revisions to make it a consistent set\n"
1247 msg = b"add %d new revisions to make it a consistent set\n"
1232 ui.write_err(msg % new_count)
1248 ui.write_err(msg % new_count)
1233
1249
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1250 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1251 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 outgoing = discovery.outgoing(repo, bases, targets)
1252 outgoing = discovery.outgoing(repo, bases, targets)
1237
1253
1238 bundle_spec = opts.get(b'type')
1254 bundle_spec = opts.get(b'type')
1239
1255
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1256 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241
1257
1242 cgversion = bundle_spec.params.get(b"cg.version")
1258 cgversion = bundle_spec.params.get(b"cg.version")
1243 if cgversion is None:
1259 if cgversion is None:
1244 if bundle_spec.version == b'v1':
1260 if bundle_spec.version == b'v1':
1245 cgversion = b'01'
1261 cgversion = b'01'
1246 if bundle_spec.version == b'v2':
1262 if bundle_spec.version == b'v2':
1247 cgversion = b'02'
1263 cgversion = b'02'
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1264 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 err = b"repository does not support bundle version %s"
1265 err = b"repository does not support bundle version %s"
1250 raise error.Abort(err % cgversion)
1266 raise error.Abort(err % cgversion)
1251
1267
1252 if cgversion == b'01': # bundle1
1268 if cgversion == b'01': # bundle1
1253 bversion = b'HG10' + bundle_spec.wirecompression
1269 bversion = b'HG10' + bundle_spec.wirecompression
1254 bcompression = None
1270 bcompression = None
1255 elif cgversion in (b'02', b'03'):
1271 elif cgversion in (b'02', b'03'):
1256 bversion = b'HG20'
1272 bversion = b'HG20'
1257 bcompression = bundle_spec.wirecompression
1273 bcompression = bundle_spec.wirecompression
1258 else:
1274 else:
1259 err = b'perf::bundle: unexpected changegroup version %s'
1275 err = b'perf::bundle: unexpected changegroup version %s'
1260 raise error.ProgrammingError(err % cgversion)
1276 raise error.ProgrammingError(err % cgversion)
1261
1277
1262 if bcompression is None:
1278 if bcompression is None:
1263 bcompression = b'UN'
1279 bcompression = b'UN'
1264
1280
1265 if bcompression != b'UN':
1281 if bcompression != b'UN':
1266 err = b'perf::bundle: compression currently unsupported: %s'
1282 err = b'perf::bundle: compression currently unsupported: %s'
1267 raise error.ProgrammingError(err % bcompression)
1283 raise error.ProgrammingError(err % bcompression)
1268
1284
1269 def do_bundle():
1285 def do_bundle():
1270 bundle2.writenewbundle(
1286 bundle2.writenewbundle(
1271 ui,
1287 ui,
1272 repo,
1288 repo,
1273 b'perf::bundle',
1289 b'perf::bundle',
1274 os.devnull,
1290 os.devnull,
1275 bversion,
1291 bversion,
1276 outgoing,
1292 outgoing,
1277 bundle_spec.params,
1293 bundle_spec.params,
1278 )
1294 )
1279
1295
1280 timer(do_bundle)
1296 timer(do_bundle)
1281 fm.end()
1297 fm.end()
1282
1298
1283
1299
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1300 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1301 def perfbundleread(ui, repo, bundlepath, **opts):
1286 """Benchmark reading of bundle files.
1302 """Benchmark reading of bundle files.
1287
1303
1288 This command is meant to isolate the I/O part of bundle reading as
1304 This command is meant to isolate the I/O part of bundle reading as
1289 much as possible.
1305 much as possible.
1290 """
1306 """
1291 from mercurial import (
1307 from mercurial import (
1292 bundle2,
1308 bundle2,
1293 exchange,
1309 exchange,
1294 streamclone,
1310 streamclone,
1295 )
1311 )
1296
1312
1297 opts = _byteskwargs(opts)
1313 opts = _byteskwargs(opts)
1298
1314
1299 def makebench(fn):
1315 def makebench(fn):
1300 def run():
1316 def run():
1301 with open(bundlepath, b'rb') as fh:
1317 with open(bundlepath, b'rb') as fh:
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1318 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 fn(bundle)
1319 fn(bundle)
1304
1320
1305 return run
1321 return run
1306
1322
1307 def makereadnbytes(size):
1323 def makereadnbytes(size):
1308 def run():
1324 def run():
1309 with open(bundlepath, b'rb') as fh:
1325 with open(bundlepath, b'rb') as fh:
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1326 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 while bundle.read(size):
1327 while bundle.read(size):
1312 pass
1328 pass
1313
1329
1314 return run
1330 return run
1315
1331
1316 def makestdioread(size):
1332 def makestdioread(size):
1317 def run():
1333 def run():
1318 with open(bundlepath, b'rb') as fh:
1334 with open(bundlepath, b'rb') as fh:
1319 while fh.read(size):
1335 while fh.read(size):
1320 pass
1336 pass
1321
1337
1322 return run
1338 return run
1323
1339
1324 # bundle1
1340 # bundle1
1325
1341
1326 def deltaiter(bundle):
1342 def deltaiter(bundle):
1327 for delta in bundle.deltaiter():
1343 for delta in bundle.deltaiter():
1328 pass
1344 pass
1329
1345
1330 def iterchunks(bundle):
1346 def iterchunks(bundle):
1331 for chunk in bundle.getchunks():
1347 for chunk in bundle.getchunks():
1332 pass
1348 pass
1333
1349
1334 # bundle2
1350 # bundle2
1335
1351
1336 def forwardchunks(bundle):
1352 def forwardchunks(bundle):
1337 for chunk in bundle._forwardchunks():
1353 for chunk in bundle._forwardchunks():
1338 pass
1354 pass
1339
1355
1340 def iterparts(bundle):
1356 def iterparts(bundle):
1341 for part in bundle.iterparts():
1357 for part in bundle.iterparts():
1342 pass
1358 pass
1343
1359
1344 def iterpartsseekable(bundle):
1360 def iterpartsseekable(bundle):
1345 for part in bundle.iterparts(seekable=True):
1361 for part in bundle.iterparts(seekable=True):
1346 pass
1362 pass
1347
1363
1348 def seek(bundle):
1364 def seek(bundle):
1349 for part in bundle.iterparts(seekable=True):
1365 for part in bundle.iterparts(seekable=True):
1350 part.seek(0, os.SEEK_END)
1366 part.seek(0, os.SEEK_END)
1351
1367
1352 def makepartreadnbytes(size):
1368 def makepartreadnbytes(size):
1353 def run():
1369 def run():
1354 with open(bundlepath, b'rb') as fh:
1370 with open(bundlepath, b'rb') as fh:
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1371 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 for part in bundle.iterparts():
1372 for part in bundle.iterparts():
1357 while part.read(size):
1373 while part.read(size):
1358 pass
1374 pass
1359
1375
1360 return run
1376 return run
1361
1377
1362 benches = [
1378 benches = [
1363 (makestdioread(8192), b'read(8k)'),
1379 (makestdioread(8192), b'read(8k)'),
1364 (makestdioread(16384), b'read(16k)'),
1380 (makestdioread(16384), b'read(16k)'),
1365 (makestdioread(32768), b'read(32k)'),
1381 (makestdioread(32768), b'read(32k)'),
1366 (makestdioread(131072), b'read(128k)'),
1382 (makestdioread(131072), b'read(128k)'),
1367 ]
1383 ]
1368
1384
1369 with open(bundlepath, b'rb') as fh:
1385 with open(bundlepath, b'rb') as fh:
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1386 bundle = exchange.readbundle(ui, fh, bundlepath)
1371
1387
1372 if isinstance(bundle, changegroup.cg1unpacker):
1388 if isinstance(bundle, changegroup.cg1unpacker):
1373 benches.extend(
1389 benches.extend(
1374 [
1390 [
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1391 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1392 (makebench(iterchunks), b'cg1 getchunks()'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1393 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1394 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1395 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1396 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 ]
1397 ]
1382 )
1398 )
1383 elif isinstance(bundle, bundle2.unbundle20):
1399 elif isinstance(bundle, bundle2.unbundle20):
1384 benches.extend(
1400 benches.extend(
1385 [
1401 [
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1402 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1403 (makebench(iterparts), b'bundle2 iterparts()'),
1388 (
1404 (
1389 makebench(iterpartsseekable),
1405 makebench(iterpartsseekable),
1390 b'bundle2 iterparts() seekable',
1406 b'bundle2 iterparts() seekable',
1391 ),
1407 ),
1392 (makebench(seek), b'bundle2 part seek()'),
1408 (makebench(seek), b'bundle2 part seek()'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1409 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1410 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1411 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1412 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 ]
1413 ]
1398 )
1414 )
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1415 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 raise error.Abort(b'stream clone bundles not supported')
1416 raise error.Abort(b'stream clone bundles not supported')
1401 else:
1417 else:
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1418 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403
1419
1404 for fn, title in benches:
1420 for fn, title in benches:
1405 timer, fm = gettimer(ui, opts)
1421 timer, fm = gettimer(ui, opts)
1406 timer(fn, title=title)
1422 timer(fn, title=title)
1407 fm.end()
1423 fm.end()
1408
1424
1409
1425
1410 @command(
1426 @command(
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1427 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 formatteropts
1428 formatteropts
1413 + [
1429 + [
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1430 (b'', b'cgversion', b'02', b'changegroup version'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1431 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 ],
1432 ],
1417 )
1433 )
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1434 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 """Benchmark producing a changelog group for a changegroup.
1435 """Benchmark producing a changelog group for a changegroup.
1420
1436
1421 This measures the time spent processing the changelog during a
1437 This measures the time spent processing the changelog during a
1422 bundle operation. This occurs during `hg bundle` and on a server
1438 bundle operation. This occurs during `hg bundle` and on a server
1423 processing a `getbundle` wire protocol request (handles clones
1439 processing a `getbundle` wire protocol request (handles clones
1424 and pull requests).
1440 and pull requests).
1425
1441
1426 By default, all revisions are added to the changegroup.
1442 By default, all revisions are added to the changegroup.
1427 """
1443 """
1428 opts = _byteskwargs(opts)
1444 opts = _byteskwargs(opts)
1429 cl = repo.changelog
1445 cl = repo.changelog
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1446 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 bundler = changegroup.getbundler(cgversion, repo)
1447 bundler = changegroup.getbundler(cgversion, repo)
1432
1448
1433 def d():
1449 def d():
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1450 state, chunks = bundler._generatechangelog(cl, nodes)
1435 for chunk in chunks:
1451 for chunk in chunks:
1436 pass
1452 pass
1437
1453
1438 timer, fm = gettimer(ui, opts)
1454 timer, fm = gettimer(ui, opts)
1439
1455
1440 # Terminal printing can interfere with timing. So disable it.
1456 # Terminal printing can interfere with timing. So disable it.
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1457 with ui.configoverride({(b'progress', b'disable'): True}):
1442 timer(d)
1458 timer(d)
1443
1459
1444 fm.end()
1460 fm.end()
1445
1461
1446
1462
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1463 @command(b'perf::dirs|perfdirs', formatteropts)
1448 def perfdirs(ui, repo, **opts):
1464 def perfdirs(ui, repo, **opts):
1449 opts = _byteskwargs(opts)
1465 opts = _byteskwargs(opts)
1450 timer, fm = gettimer(ui, opts)
1466 timer, fm = gettimer(ui, opts)
1451 dirstate = repo.dirstate
1467 dirstate = repo.dirstate
1452 b'a' in dirstate
1468 b'a' in dirstate
1453
1469
1454 def d():
1470 def d():
1455 dirstate.hasdir(b'a')
1471 dirstate.hasdir(b'a')
1456 try:
1472 try:
1457 del dirstate._map._dirs
1473 del dirstate._map._dirs
1458 except AttributeError:
1474 except AttributeError:
1459 pass
1475 pass
1460
1476
1461 timer(d)
1477 timer(d)
1462 fm.end()
1478 fm.end()
1463
1479
1464
1480
1465 @command(
1481 @command(
1466 b'perf::dirstate|perfdirstate',
1482 b'perf::dirstate|perfdirstate',
1467 [
1483 [
1468 (
1484 (
1469 b'',
1485 b'',
1470 b'iteration',
1486 b'iteration',
1471 None,
1487 None,
1472 b'benchmark a full iteration for the dirstate',
1488 b'benchmark a full iteration for the dirstate',
1473 ),
1489 ),
1474 (
1490 (
1475 b'',
1491 b'',
1476 b'contains',
1492 b'contains',
1477 None,
1493 None,
1478 b'benchmark a large amount of `nf in dirstate` calls',
1494 b'benchmark a large amount of `nf in dirstate` calls',
1479 ),
1495 ),
1480 ]
1496 ]
1481 + formatteropts,
1497 + formatteropts,
1482 )
1498 )
1483 def perfdirstate(ui, repo, **opts):
1499 def perfdirstate(ui, repo, **opts):
1484 """benchmap the time of various distate operations
1500 """benchmap the time of various distate operations
1485
1501
1486 By default benchmark the time necessary to load a dirstate from scratch.
1502 By default benchmark the time necessary to load a dirstate from scratch.
1487 The dirstate is loaded to the point were a "contains" request can be
1503 The dirstate is loaded to the point were a "contains" request can be
1488 answered.
1504 answered.
1489 """
1505 """
1490 opts = _byteskwargs(opts)
1506 opts = _byteskwargs(opts)
1491 timer, fm = gettimer(ui, opts)
1507 timer, fm = gettimer(ui, opts)
1492 b"a" in repo.dirstate
1508 b"a" in repo.dirstate
1493
1509
1494 if opts[b'iteration'] and opts[b'contains']:
1510 if opts[b'iteration'] and opts[b'contains']:
1495 msg = b'only specify one of --iteration or --contains'
1511 msg = b'only specify one of --iteration or --contains'
1496 raise error.Abort(msg)
1512 raise error.Abort(msg)
1497
1513
1498 if opts[b'iteration']:
1514 if opts[b'iteration']:
1499 setup = None
1515 setup = None
1500 dirstate = repo.dirstate
1516 dirstate = repo.dirstate
1501
1517
1502 def d():
1518 def d():
1503 for f in dirstate:
1519 for f in dirstate:
1504 pass
1520 pass
1505
1521
1506 elif opts[b'contains']:
1522 elif opts[b'contains']:
1507 setup = None
1523 setup = None
1508 dirstate = repo.dirstate
1524 dirstate = repo.dirstate
1509 allfiles = list(dirstate)
1525 allfiles = list(dirstate)
1510 # also add file path that will be "missing" from the dirstate
1526 # also add file path that will be "missing" from the dirstate
1511 allfiles.extend([f[::-1] for f in allfiles])
1527 allfiles.extend([f[::-1] for f in allfiles])
1512
1528
1513 def d():
1529 def d():
1514 for f in allfiles:
1530 for f in allfiles:
1515 f in dirstate
1531 f in dirstate
1516
1532
1517 else:
1533 else:
1518
1534
1519 def setup():
1535 def setup():
1520 repo.dirstate.invalidate()
1536 repo.dirstate.invalidate()
1521
1537
1522 def d():
1538 def d():
1523 b"a" in repo.dirstate
1539 b"a" in repo.dirstate
1524
1540
1525 timer(d, setup=setup)
1541 timer(d, setup=setup)
1526 fm.end()
1542 fm.end()
1527
1543
1528
1544
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1545 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 def perfdirstatedirs(ui, repo, **opts):
1546 def perfdirstatedirs(ui, repo, **opts):
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1547 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 opts = _byteskwargs(opts)
1548 opts = _byteskwargs(opts)
1533 timer, fm = gettimer(ui, opts)
1549 timer, fm = gettimer(ui, opts)
1534 repo.dirstate.hasdir(b"a")
1550 repo.dirstate.hasdir(b"a")
1535
1551
1536 def setup():
1552 def setup():
1537 try:
1553 try:
1538 del repo.dirstate._map._dirs
1554 del repo.dirstate._map._dirs
1539 except AttributeError:
1555 except AttributeError:
1540 pass
1556 pass
1541
1557
1542 def d():
1558 def d():
1543 repo.dirstate.hasdir(b"a")
1559 repo.dirstate.hasdir(b"a")
1544
1560
1545 timer(d, setup=setup)
1561 timer(d, setup=setup)
1546 fm.end()
1562 fm.end()
1547
1563
1548
1564
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1565 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 def perfdirstatefoldmap(ui, repo, **opts):
1566 def perfdirstatefoldmap(ui, repo, **opts):
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1567 """benchmap a `dirstate._map.filefoldmap.get()` request
1552
1568
1553 The dirstate filefoldmap cache is dropped between every request.
1569 The dirstate filefoldmap cache is dropped between every request.
1554 """
1570 """
1555 opts = _byteskwargs(opts)
1571 opts = _byteskwargs(opts)
1556 timer, fm = gettimer(ui, opts)
1572 timer, fm = gettimer(ui, opts)
1557 dirstate = repo.dirstate
1573 dirstate = repo.dirstate
1558 dirstate._map.filefoldmap.get(b'a')
1574 dirstate._map.filefoldmap.get(b'a')
1559
1575
1560 def setup():
1576 def setup():
1561 del dirstate._map.filefoldmap
1577 del dirstate._map.filefoldmap
1562
1578
1563 def d():
1579 def d():
1564 dirstate._map.filefoldmap.get(b'a')
1580 dirstate._map.filefoldmap.get(b'a')
1565
1581
1566 timer(d, setup=setup)
1582 timer(d, setup=setup)
1567 fm.end()
1583 fm.end()
1568
1584
1569
1585
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1586 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 def perfdirfoldmap(ui, repo, **opts):
1587 def perfdirfoldmap(ui, repo, **opts):
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1588 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573
1589
1574 The dirstate dirfoldmap cache is dropped between every request.
1590 The dirstate dirfoldmap cache is dropped between every request.
1575 """
1591 """
1576 opts = _byteskwargs(opts)
1592 opts = _byteskwargs(opts)
1577 timer, fm = gettimer(ui, opts)
1593 timer, fm = gettimer(ui, opts)
1578 dirstate = repo.dirstate
1594 dirstate = repo.dirstate
1579 dirstate._map.dirfoldmap.get(b'a')
1595 dirstate._map.dirfoldmap.get(b'a')
1580
1596
1581 def setup():
1597 def setup():
1582 del dirstate._map.dirfoldmap
1598 del dirstate._map.dirfoldmap
1583 try:
1599 try:
1584 del dirstate._map._dirs
1600 del dirstate._map._dirs
1585 except AttributeError:
1601 except AttributeError:
1586 pass
1602 pass
1587
1603
1588 def d():
1604 def d():
1589 dirstate._map.dirfoldmap.get(b'a')
1605 dirstate._map.dirfoldmap.get(b'a')
1590
1606
1591 timer(d, setup=setup)
1607 timer(d, setup=setup)
1592 fm.end()
1608 fm.end()
1593
1609
1594
1610
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1611 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 def perfdirstatewrite(ui, repo, **opts):
1612 def perfdirstatewrite(ui, repo, **opts):
1597 """benchmap the time it take to write a dirstate on disk"""
1613 """benchmap the time it take to write a dirstate on disk"""
1598 opts = _byteskwargs(opts)
1614 opts = _byteskwargs(opts)
1599 timer, fm = gettimer(ui, opts)
1615 timer, fm = gettimer(ui, opts)
1600 ds = repo.dirstate
1616 ds = repo.dirstate
1601 b"a" in ds
1617 b"a" in ds
1602
1618
1603 def setup():
1619 def setup():
1604 ds._dirty = True
1620 ds._dirty = True
1605
1621
1606 def d():
1622 def d():
1607 ds.write(repo.currenttransaction())
1623 ds.write(repo.currenttransaction())
1608
1624
1609 with repo.wlock():
1625 with repo.wlock():
1610 timer(d, setup=setup)
1626 timer(d, setup=setup)
1611 fm.end()
1627 fm.end()
1612
1628
1613
1629
1614 def _getmergerevs(repo, opts):
1630 def _getmergerevs(repo, opts):
1615 """parse command argument to return rev involved in merge
1631 """parse command argument to return rev involved in merge
1616
1632
1617 input: options dictionnary with `rev`, `from` and `bse`
1633 input: options dictionnary with `rev`, `from` and `bse`
1618 output: (localctx, otherctx, basectx)
1634 output: (localctx, otherctx, basectx)
1619 """
1635 """
1620 if opts[b'from']:
1636 if opts[b'from']:
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1637 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 wctx = repo[fromrev]
1638 wctx = repo[fromrev]
1623 else:
1639 else:
1624 wctx = repo[None]
1640 wctx = repo[None]
1625 # we don't want working dir files to be stat'd in the benchmark, so
1641 # we don't want working dir files to be stat'd in the benchmark, so
1626 # prime that cache
1642 # prime that cache
1627 wctx.dirty()
1643 wctx.dirty()
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1644 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 if opts[b'base']:
1645 if opts[b'base']:
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1646 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 ancestor = repo[fromrev]
1647 ancestor = repo[fromrev]
1632 else:
1648 else:
1633 ancestor = wctx.ancestor(rctx)
1649 ancestor = wctx.ancestor(rctx)
1634 return (wctx, rctx, ancestor)
1650 return (wctx, rctx, ancestor)
1635
1651
1636
1652
1637 @command(
1653 @command(
1638 b'perf::mergecalculate|perfmergecalculate',
1654 b'perf::mergecalculate|perfmergecalculate',
1639 [
1655 [
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1656 (b'r', b'rev', b'.', b'rev to merge against'),
1641 (b'', b'from', b'', b'rev to merge from'),
1657 (b'', b'from', b'', b'rev to merge from'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1658 (b'', b'base', b'', b'the revision to use as base'),
1643 ]
1659 ]
1644 + formatteropts,
1660 + formatteropts,
1645 )
1661 )
1646 def perfmergecalculate(ui, repo, **opts):
1662 def perfmergecalculate(ui, repo, **opts):
1647 opts = _byteskwargs(opts)
1663 opts = _byteskwargs(opts)
1648 timer, fm = gettimer(ui, opts)
1664 timer, fm = gettimer(ui, opts)
1649
1665
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1666 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651
1667
1652 def d():
1668 def d():
1653 # acceptremote is True because we don't want prompts in the middle of
1669 # acceptremote is True because we don't want prompts in the middle of
1654 # our benchmark
1670 # our benchmark
1655 merge.calculateupdates(
1671 merge.calculateupdates(
1656 repo,
1672 repo,
1657 wctx,
1673 wctx,
1658 rctx,
1674 rctx,
1659 [ancestor],
1675 [ancestor],
1660 branchmerge=False,
1676 branchmerge=False,
1661 force=False,
1677 force=False,
1662 acceptremote=True,
1678 acceptremote=True,
1663 followcopies=True,
1679 followcopies=True,
1664 )
1680 )
1665
1681
1666 timer(d)
1682 timer(d)
1667 fm.end()
1683 fm.end()
1668
1684
1669
1685
1670 @command(
1686 @command(
1671 b'perf::mergecopies|perfmergecopies',
1687 b'perf::mergecopies|perfmergecopies',
1672 [
1688 [
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1689 (b'r', b'rev', b'.', b'rev to merge against'),
1674 (b'', b'from', b'', b'rev to merge from'),
1690 (b'', b'from', b'', b'rev to merge from'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1691 (b'', b'base', b'', b'the revision to use as base'),
1676 ]
1692 ]
1677 + formatteropts,
1693 + formatteropts,
1678 )
1694 )
1679 def perfmergecopies(ui, repo, **opts):
1695 def perfmergecopies(ui, repo, **opts):
1680 """measure runtime of `copies.mergecopies`"""
1696 """measure runtime of `copies.mergecopies`"""
1681 opts = _byteskwargs(opts)
1697 opts = _byteskwargs(opts)
1682 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1699 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684
1700
1685 def d():
1701 def d():
1686 # acceptremote is True because we don't want prompts in the middle of
1702 # acceptremote is True because we don't want prompts in the middle of
1687 # our benchmark
1703 # our benchmark
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1704 copies.mergecopies(repo, wctx, rctx, ancestor)
1689
1705
1690 timer(d)
1706 timer(d)
1691 fm.end()
1707 fm.end()
1692
1708
1693
1709
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1710 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1711 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 """benchmark the copy tracing logic"""
1712 """benchmark the copy tracing logic"""
1697 opts = _byteskwargs(opts)
1713 opts = _byteskwargs(opts)
1698 timer, fm = gettimer(ui, opts)
1714 timer, fm = gettimer(ui, opts)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1715 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1716 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701
1717
1702 def d():
1718 def d():
1703 copies.pathcopies(ctx1, ctx2)
1719 copies.pathcopies(ctx1, ctx2)
1704
1720
1705 timer(d)
1721 timer(d)
1706 fm.end()
1722 fm.end()
1707
1723
1708
1724
1709 @command(
1725 @command(
1710 b'perf::phases|perfphases',
1726 b'perf::phases|perfphases',
1711 [
1727 [
1712 (b'', b'full', False, b'include file reading time too'),
1728 (b'', b'full', False, b'include file reading time too'),
1713 ]
1729 ]
1714 + formatteropts,
1730 + formatteropts,
1715 b"",
1731 b"",
1716 )
1732 )
1717 def perfphases(ui, repo, **opts):
1733 def perfphases(ui, repo, **opts):
1718 """benchmark phasesets computation"""
1734 """benchmark phasesets computation"""
1719 opts = _byteskwargs(opts)
1735 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1736 timer, fm = gettimer(ui, opts)
1721 _phases = repo._phasecache
1737 _phases = repo._phasecache
1722 full = opts.get(b'full')
1738 full = opts.get(b'full')
1723 tip_rev = repo.changelog.tiprev()
1739 tip_rev = repo.changelog.tiprev()
1724
1740
1725 def d():
1741 def d():
1726 phases = _phases
1742 phases = _phases
1727 if full:
1743 if full:
1728 clearfilecache(repo, b'_phasecache')
1744 clearfilecache(repo, b'_phasecache')
1729 phases = repo._phasecache
1745 phases = repo._phasecache
1730 phases.invalidate()
1746 phases.invalidate()
1731 phases.phase(repo, tip_rev)
1747 phases.phase(repo, tip_rev)
1732
1748
1733 timer(d)
1749 timer(d)
1734 fm.end()
1750 fm.end()
1735
1751
1736
1752
1737 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1753 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1738 def perfphasesremote(ui, repo, dest=None, **opts):
1754 def perfphasesremote(ui, repo, dest=None, **opts):
1739 """benchmark time needed to analyse phases of the remote server"""
1755 """benchmark time needed to analyse phases of the remote server"""
1740 from mercurial.node import bin
1756 from mercurial.node import bin
1741 from mercurial import (
1757 from mercurial import (
1742 exchange,
1758 exchange,
1743 hg,
1759 hg,
1744 phases,
1760 phases,
1745 )
1761 )
1746
1762
1747 opts = _byteskwargs(opts)
1763 opts = _byteskwargs(opts)
1748 timer, fm = gettimer(ui, opts)
1764 timer, fm = gettimer(ui, opts)
1749
1765
1750 path = ui.getpath(dest, default=(b'default-push', b'default'))
1766 path = ui.getpath(dest, default=(b'default-push', b'default'))
1751 if not path:
1767 if not path:
1752 raise error.Abort(
1768 raise error.Abort(
1753 b'default repository not configured!',
1769 b'default repository not configured!',
1754 hint=b"see 'hg help config.paths'",
1770 hint=b"see 'hg help config.paths'",
1755 )
1771 )
1756 if util.safehasattr(path, 'main_path'):
1772 if util.safehasattr(path, 'main_path'):
1757 path = path.get_push_variant()
1773 path = path.get_push_variant()
1758 dest = path.loc
1774 dest = path.loc
1759 else:
1775 else:
1760 dest = path.pushloc or path.loc
1776 dest = path.pushloc or path.loc
1761 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1777 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1762 other = hg.peer(repo, opts, dest)
1778 other = hg.peer(repo, opts, dest)
1763
1779
1764 # easier to perform discovery through the operation
1780 # easier to perform discovery through the operation
1765 op = exchange.pushoperation(repo, other)
1781 op = exchange.pushoperation(repo, other)
1766 exchange._pushdiscoverychangeset(op)
1782 exchange._pushdiscoverychangeset(op)
1767
1783
1768 remotesubset = op.fallbackheads
1784 remotesubset = op.fallbackheads
1769
1785
1770 with other.commandexecutor() as e:
1786 with other.commandexecutor() as e:
1771 remotephases = e.callcommand(
1787 remotephases = e.callcommand(
1772 b'listkeys', {b'namespace': b'phases'}
1788 b'listkeys', {b'namespace': b'phases'}
1773 ).result()
1789 ).result()
1774 del other
1790 del other
1775 publishing = remotephases.get(b'publishing', False)
1791 publishing = remotephases.get(b'publishing', False)
1776 if publishing:
1792 if publishing:
1777 ui.statusnoi18n(b'publishing: yes\n')
1793 ui.statusnoi18n(b'publishing: yes\n')
1778 else:
1794 else:
1779 ui.statusnoi18n(b'publishing: no\n')
1795 ui.statusnoi18n(b'publishing: no\n')
1780
1796
1781 has_node = getattr(repo.changelog.index, 'has_node', None)
1797 has_node = getattr(repo.changelog.index, 'has_node', None)
1782 if has_node is None:
1798 if has_node is None:
1783 has_node = repo.changelog.nodemap.__contains__
1799 has_node = repo.changelog.nodemap.__contains__
1784 nonpublishroots = 0
1800 nonpublishroots = 0
1785 for nhex, phase in remotephases.iteritems():
1801 for nhex, phase in remotephases.iteritems():
1786 if nhex == b'publishing': # ignore data related to publish option
1802 if nhex == b'publishing': # ignore data related to publish option
1787 continue
1803 continue
1788 node = bin(nhex)
1804 node = bin(nhex)
1789 if has_node(node) and int(phase):
1805 if has_node(node) and int(phase):
1790 nonpublishroots += 1
1806 nonpublishroots += 1
1791 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1807 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1792 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1808 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1793
1809
1794 def d():
1810 def d():
1795 phases.remotephasessummary(repo, remotesubset, remotephases)
1811 phases.remotephasessummary(repo, remotesubset, remotephases)
1796
1812
1797 timer(d)
1813 timer(d)
1798 fm.end()
1814 fm.end()
1799
1815
1800
1816
1801 @command(
1817 @command(
1802 b'perf::manifest|perfmanifest',
1818 b'perf::manifest|perfmanifest',
1803 [
1819 [
1804 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1820 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1805 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1821 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1806 ]
1822 ]
1807 + formatteropts,
1823 + formatteropts,
1808 b'REV|NODE',
1824 b'REV|NODE',
1809 )
1825 )
1810 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1826 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1811 """benchmark the time to read a manifest from disk and return a usable
1827 """benchmark the time to read a manifest from disk and return a usable
1812 dict-like object
1828 dict-like object
1813
1829
1814 Manifest caches are cleared before retrieval."""
1830 Manifest caches are cleared before retrieval."""
1815 opts = _byteskwargs(opts)
1831 opts = _byteskwargs(opts)
1816 timer, fm = gettimer(ui, opts)
1832 timer, fm = gettimer(ui, opts)
1817 if not manifest_rev:
1833 if not manifest_rev:
1818 ctx = scmutil.revsingle(repo, rev, rev)
1834 ctx = scmutil.revsingle(repo, rev, rev)
1819 t = ctx.manifestnode()
1835 t = ctx.manifestnode()
1820 else:
1836 else:
1821 from mercurial.node import bin
1837 from mercurial.node import bin
1822
1838
1823 if len(rev) == 40:
1839 if len(rev) == 40:
1824 t = bin(rev)
1840 t = bin(rev)
1825 else:
1841 else:
1826 try:
1842 try:
1827 rev = int(rev)
1843 rev = int(rev)
1828
1844
1829 if util.safehasattr(repo.manifestlog, b'getstorage'):
1845 if util.safehasattr(repo.manifestlog, b'getstorage'):
1830 t = repo.manifestlog.getstorage(b'').node(rev)
1846 t = repo.manifestlog.getstorage(b'').node(rev)
1831 else:
1847 else:
1832 t = repo.manifestlog._revlog.lookup(rev)
1848 t = repo.manifestlog._revlog.lookup(rev)
1833 except ValueError:
1849 except ValueError:
1834 raise error.Abort(
1850 raise error.Abort(
1835 b'manifest revision must be integer or full node'
1851 b'manifest revision must be integer or full node'
1836 )
1852 )
1837
1853
1838 def d():
1854 def d():
1839 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1855 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1840 repo.manifestlog[t].read()
1856 repo.manifestlog[t].read()
1841
1857
1842 timer(d)
1858 timer(d)
1843 fm.end()
1859 fm.end()
1844
1860
1845
1861
1846 @command(b'perf::changeset|perfchangeset', formatteropts)
1862 @command(b'perf::changeset|perfchangeset', formatteropts)
1847 def perfchangeset(ui, repo, rev, **opts):
1863 def perfchangeset(ui, repo, rev, **opts):
1848 opts = _byteskwargs(opts)
1864 opts = _byteskwargs(opts)
1849 timer, fm = gettimer(ui, opts)
1865 timer, fm = gettimer(ui, opts)
1850 n = scmutil.revsingle(repo, rev).node()
1866 n = scmutil.revsingle(repo, rev).node()
1851
1867
1852 def d():
1868 def d():
1853 repo.changelog.read(n)
1869 repo.changelog.read(n)
1854 # repo.changelog._cache = None
1870 # repo.changelog._cache = None
1855
1871
1856 timer(d)
1872 timer(d)
1857 fm.end()
1873 fm.end()
1858
1874
1859
1875
1860 @command(b'perf::ignore|perfignore', formatteropts)
1876 @command(b'perf::ignore|perfignore', formatteropts)
1861 def perfignore(ui, repo, **opts):
1877 def perfignore(ui, repo, **opts):
1862 """benchmark operation related to computing ignore"""
1878 """benchmark operation related to computing ignore"""
1863 opts = _byteskwargs(opts)
1879 opts = _byteskwargs(opts)
1864 timer, fm = gettimer(ui, opts)
1880 timer, fm = gettimer(ui, opts)
1865 dirstate = repo.dirstate
1881 dirstate = repo.dirstate
1866
1882
1867 def setupone():
1883 def setupone():
1868 dirstate.invalidate()
1884 dirstate.invalidate()
1869 clearfilecache(dirstate, b'_ignore')
1885 clearfilecache(dirstate, b'_ignore')
1870
1886
1871 def runone():
1887 def runone():
1872 dirstate._ignore
1888 dirstate._ignore
1873
1889
1874 timer(runone, setup=setupone, title=b"load")
1890 timer(runone, setup=setupone, title=b"load")
1875 fm.end()
1891 fm.end()
1876
1892
1877
1893
1878 @command(
1894 @command(
1879 b'perf::index|perfindex',
1895 b'perf::index|perfindex',
1880 [
1896 [
1881 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1897 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1882 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1898 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1883 ]
1899 ]
1884 + formatteropts,
1900 + formatteropts,
1885 )
1901 )
1886 def perfindex(ui, repo, **opts):
1902 def perfindex(ui, repo, **opts):
1887 """benchmark index creation time followed by a lookup
1903 """benchmark index creation time followed by a lookup
1888
1904
1889 The default is to look `tip` up. Depending on the index implementation,
1905 The default is to look `tip` up. Depending on the index implementation,
1890 the revision looked up can matters. For example, an implementation
1906 the revision looked up can matters. For example, an implementation
1891 scanning the index will have a faster lookup time for `--rev tip` than for
1907 scanning the index will have a faster lookup time for `--rev tip` than for
1892 `--rev 0`. The number of looked up revisions and their order can also
1908 `--rev 0`. The number of looked up revisions and their order can also
1893 matters.
1909 matters.
1894
1910
1895 Example of useful set to test:
1911 Example of useful set to test:
1896
1912
1897 * tip
1913 * tip
1898 * 0
1914 * 0
1899 * -10:
1915 * -10:
1900 * :10
1916 * :10
1901 * -10: + :10
1917 * -10: + :10
1902 * :10: + -10:
1918 * :10: + -10:
1903 * -10000:
1919 * -10000:
1904 * -10000: + 0
1920 * -10000: + 0
1905
1921
1906 It is not currently possible to check for lookup of a missing node. For
1922 It is not currently possible to check for lookup of a missing node. For
1907 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1923 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1908 import mercurial.revlog
1924 import mercurial.revlog
1909
1925
1910 opts = _byteskwargs(opts)
1926 opts = _byteskwargs(opts)
1911 timer, fm = gettimer(ui, opts)
1927 timer, fm = gettimer(ui, opts)
1912 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1928 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1913 if opts[b'no_lookup']:
1929 if opts[b'no_lookup']:
1914 if opts['rev']:
1930 if opts['rev']:
1915 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1931 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1916 nodes = []
1932 nodes = []
1917 elif not opts[b'rev']:
1933 elif not opts[b'rev']:
1918 nodes = [repo[b"tip"].node()]
1934 nodes = [repo[b"tip"].node()]
1919 else:
1935 else:
1920 revs = scmutil.revrange(repo, opts[b'rev'])
1936 revs = scmutil.revrange(repo, opts[b'rev'])
1921 cl = repo.changelog
1937 cl = repo.changelog
1922 nodes = [cl.node(r) for r in revs]
1938 nodes = [cl.node(r) for r in revs]
1923
1939
1924 unfi = repo.unfiltered()
1940 unfi = repo.unfiltered()
1925 # find the filecache func directly
1941 # find the filecache func directly
1926 # This avoid polluting the benchmark with the filecache logic
1942 # This avoid polluting the benchmark with the filecache logic
1927 makecl = unfi.__class__.changelog.func
1943 makecl = unfi.__class__.changelog.func
1928
1944
1929 def setup():
1945 def setup():
1930 # probably not necessary, but for good measure
1946 # probably not necessary, but for good measure
1931 clearchangelog(unfi)
1947 clearchangelog(unfi)
1932
1948
1933 def d():
1949 def d():
1934 cl = makecl(unfi)
1950 cl = makecl(unfi)
1935 for n in nodes:
1951 for n in nodes:
1936 cl.rev(n)
1952 cl.rev(n)
1937
1953
1938 timer(d, setup=setup)
1954 timer(d, setup=setup)
1939 fm.end()
1955 fm.end()
1940
1956
1941
1957
1942 @command(
1958 @command(
1943 b'perf::nodemap|perfnodemap',
1959 b'perf::nodemap|perfnodemap',
1944 [
1960 [
1945 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1961 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1962 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1947 ]
1963 ]
1948 + formatteropts,
1964 + formatteropts,
1949 )
1965 )
1950 def perfnodemap(ui, repo, **opts):
1966 def perfnodemap(ui, repo, **opts):
1951 """benchmark the time necessary to look up revision from a cold nodemap
1967 """benchmark the time necessary to look up revision from a cold nodemap
1952
1968
1953 Depending on the implementation, the amount and order of revision we look
1969 Depending on the implementation, the amount and order of revision we look
1954 up can varies. Example of useful set to test:
1970 up can varies. Example of useful set to test:
1955 * tip
1971 * tip
1956 * 0
1972 * 0
1957 * -10:
1973 * -10:
1958 * :10
1974 * :10
1959 * -10: + :10
1975 * -10: + :10
1960 * :10: + -10:
1976 * :10: + -10:
1961 * -10000:
1977 * -10000:
1962 * -10000: + 0
1978 * -10000: + 0
1963
1979
1964 The command currently focus on valid binary lookup. Benchmarking for
1980 The command currently focus on valid binary lookup. Benchmarking for
1965 hexlookup, prefix lookup and missing lookup would also be valuable.
1981 hexlookup, prefix lookup and missing lookup would also be valuable.
1966 """
1982 """
1967 import mercurial.revlog
1983 import mercurial.revlog
1968
1984
1969 opts = _byteskwargs(opts)
1985 opts = _byteskwargs(opts)
1970 timer, fm = gettimer(ui, opts)
1986 timer, fm = gettimer(ui, opts)
1971 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1987 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1972
1988
1973 unfi = repo.unfiltered()
1989 unfi = repo.unfiltered()
1974 clearcaches = opts[b'clear_caches']
1990 clearcaches = opts[b'clear_caches']
1975 # find the filecache func directly
1991 # find the filecache func directly
1976 # This avoid polluting the benchmark with the filecache logic
1992 # This avoid polluting the benchmark with the filecache logic
1977 makecl = unfi.__class__.changelog.func
1993 makecl = unfi.__class__.changelog.func
1978 if not opts[b'rev']:
1994 if not opts[b'rev']:
1979 raise error.Abort(b'use --rev to specify revisions to look up')
1995 raise error.Abort(b'use --rev to specify revisions to look up')
1980 revs = scmutil.revrange(repo, opts[b'rev'])
1996 revs = scmutil.revrange(repo, opts[b'rev'])
1981 cl = repo.changelog
1997 cl = repo.changelog
1982 nodes = [cl.node(r) for r in revs]
1998 nodes = [cl.node(r) for r in revs]
1983
1999
1984 # use a list to pass reference to a nodemap from one closure to the next
2000 # use a list to pass reference to a nodemap from one closure to the next
1985 nodeget = [None]
2001 nodeget = [None]
1986
2002
1987 def setnodeget():
2003 def setnodeget():
1988 # probably not necessary, but for good measure
2004 # probably not necessary, but for good measure
1989 clearchangelog(unfi)
2005 clearchangelog(unfi)
1990 cl = makecl(unfi)
2006 cl = makecl(unfi)
1991 if util.safehasattr(cl.index, 'get_rev'):
2007 if util.safehasattr(cl.index, 'get_rev'):
1992 nodeget[0] = cl.index.get_rev
2008 nodeget[0] = cl.index.get_rev
1993 else:
2009 else:
1994 nodeget[0] = cl.nodemap.get
2010 nodeget[0] = cl.nodemap.get
1995
2011
1996 def d():
2012 def d():
1997 get = nodeget[0]
2013 get = nodeget[0]
1998 for n in nodes:
2014 for n in nodes:
1999 get(n)
2015 get(n)
2000
2016
2001 setup = None
2017 setup = None
2002 if clearcaches:
2018 if clearcaches:
2003
2019
2004 def setup():
2020 def setup():
2005 setnodeget()
2021 setnodeget()
2006
2022
2007 else:
2023 else:
2008 setnodeget()
2024 setnodeget()
2009 d() # prewarm the data structure
2025 d() # prewarm the data structure
2010 timer(d, setup=setup)
2026 timer(d, setup=setup)
2011 fm.end()
2027 fm.end()
2012
2028
2013
2029
2014 @command(b'perf::startup|perfstartup', formatteropts)
2030 @command(b'perf::startup|perfstartup', formatteropts)
2015 def perfstartup(ui, repo, **opts):
2031 def perfstartup(ui, repo, **opts):
2016 opts = _byteskwargs(opts)
2032 opts = _byteskwargs(opts)
2017 timer, fm = gettimer(ui, opts)
2033 timer, fm = gettimer(ui, opts)
2018
2034
2019 def d():
2035 def d():
2020 if os.name != 'nt':
2036 if os.name != 'nt':
2021 os.system(
2037 os.system(
2022 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2038 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2023 )
2039 )
2024 else:
2040 else:
2025 os.environ['HGRCPATH'] = r' '
2041 os.environ['HGRCPATH'] = r' '
2026 os.system("%s version -q > NUL" % sys.argv[0])
2042 os.system("%s version -q > NUL" % sys.argv[0])
2027
2043
2028 timer(d)
2044 timer(d)
2029 fm.end()
2045 fm.end()
2030
2046
2031
2047
2032 def _find_stream_generator(version):
2048 def _find_stream_generator(version):
2033 """find the proper generator function for this stream version"""
2049 """find the proper generator function for this stream version"""
2034 import mercurial.streamclone
2050 import mercurial.streamclone
2035
2051
2036 available = {}
2052 available = {}
2037
2053
2038 # try to fetch a v1 generator
2054 # try to fetch a v1 generator
2039 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2055 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2040 if generatev1 is not None:
2056 if generatev1 is not None:
2041
2057
2042 def generate(repo):
2058 def generate(repo):
2043 entries, bytes, data = generatev1(repo, None, None, True)
2059 entries, bytes, data = generatev1(repo, None, None, True)
2044 return data
2060 return data
2045
2061
2046 available[b'v1'] = generatev1
2062 available[b'v1'] = generatev1
2047 # try to fetch a v2 generator
2063 # try to fetch a v2 generator
2048 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2064 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2049 if generatev2 is not None:
2065 if generatev2 is not None:
2050
2066
2051 def generate(repo):
2067 def generate(repo):
2052 entries, bytes, data = generatev2(repo, None, None, True)
2068 entries, bytes, data = generatev2(repo, None, None, True)
2053 return data
2069 return data
2054
2070
2055 available[b'v2'] = generate
2071 available[b'v2'] = generate
2056 # try to fetch a v3 generator
2072 # try to fetch a v3 generator
2057 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2073 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2058 if generatev3 is not None:
2074 if generatev3 is not None:
2059
2075
2060 def generate(repo):
2076 def generate(repo):
2061 return generatev3(repo, None, None, True)
2077 return generatev3(repo, None, None, True)
2062
2078
2063 available[b'v3-exp'] = generate
2079 available[b'v3-exp'] = generate
2064
2080
2065 # resolve the request
2081 # resolve the request
2066 if version == b"latest":
2082 if version == b"latest":
2067 # latest is the highest non experimental version
2083 # latest is the highest non experimental version
2068 latest_key = max(v for v in available if b'-exp' not in v)
2084 latest_key = max(v for v in available if b'-exp' not in v)
2069 return available[latest_key]
2085 return available[latest_key]
2070 elif version in available:
2086 elif version in available:
2071 return available[version]
2087 return available[version]
2072 else:
2088 else:
2073 msg = b"unkown or unavailable version: %s"
2089 msg = b"unkown or unavailable version: %s"
2074 msg %= version
2090 msg %= version
2075 hint = b"available versions: %s"
2091 hint = b"available versions: %s"
2076 hint %= b', '.join(sorted(available))
2092 hint %= b', '.join(sorted(available))
2077 raise error.Abort(msg, hint=hint)
2093 raise error.Abort(msg, hint=hint)
2078
2094
2079
2095
2080 @command(
2096 @command(
2081 b'perf::stream-locked-section',
2097 b'perf::stream-locked-section',
2082 [
2098 [
2083 (
2099 (
2084 b'',
2100 b'',
2085 b'stream-version',
2101 b'stream-version',
2086 b'latest',
2102 b'latest',
2087 b'stream version to use ("v1", "v2", "v3-exp" '
2103 b'stream version to use ("v1", "v2", "v3-exp" '
2088 b'or "latest", (the default))',
2104 b'or "latest", (the default))',
2089 ),
2105 ),
2090 ]
2106 ]
2091 + formatteropts,
2107 + formatteropts,
2092 )
2108 )
2093 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2109 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2094 """benchmark the initial, repo-locked, section of a stream-clone"""
2110 """benchmark the initial, repo-locked, section of a stream-clone"""
2095
2111
2096 opts = _byteskwargs(opts)
2112 opts = _byteskwargs(opts)
2097 timer, fm = gettimer(ui, opts)
2113 timer, fm = gettimer(ui, opts)
2098
2114
2099 # deletion of the generator may trigger some cleanup that we do not want to
2115 # deletion of the generator may trigger some cleanup that we do not want to
2100 # measure
2116 # measure
2101 result_holder = [None]
2117 result_holder = [None]
2102
2118
2103 def setupone():
2119 def setupone():
2104 result_holder[0] = None
2120 result_holder[0] = None
2105
2121
2106 generate = _find_stream_generator(stream_version)
2122 generate = _find_stream_generator(stream_version)
2107
2123
2108 def runone():
2124 def runone():
2109 # the lock is held for the duration the initialisation
2125 # the lock is held for the duration the initialisation
2110 result_holder[0] = generate(repo)
2126 result_holder[0] = generate(repo)
2111
2127
2112 timer(runone, setup=setupone, title=b"load")
2128 timer(runone, setup=setupone, title=b"load")
2113 fm.end()
2129 fm.end()
2114
2130
2115
2131
2116 @command(
2132 @command(
2117 b'perf::stream-generate',
2133 b'perf::stream-generate',
2118 [
2134 [
2119 (
2135 (
2120 b'',
2136 b'',
2121 b'stream-version',
2137 b'stream-version',
2122 b'latest',
2138 b'latest',
2123 b'stream version to us ("v1", "v2", "v3-exp" '
2139 b'stream version to us ("v1", "v2", "v3-exp" '
2124 b'or "latest", (the default))',
2140 b'or "latest", (the default))',
2125 ),
2141 ),
2126 ]
2142 ]
2127 + formatteropts,
2143 + formatteropts,
2128 )
2144 )
2129 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2145 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2130 """benchmark the full generation of a stream clone"""
2146 """benchmark the full generation of a stream clone"""
2131
2147
2132 opts = _byteskwargs(opts)
2148 opts = _byteskwargs(opts)
2133 timer, fm = gettimer(ui, opts)
2149 timer, fm = gettimer(ui, opts)
2134
2150
2135 # deletion of the generator may trigger some cleanup that we do not want to
2151 # deletion of the generator may trigger some cleanup that we do not want to
2136 # measure
2152 # measure
2137
2153
2138 generate = _find_stream_generator(stream_version)
2154 generate = _find_stream_generator(stream_version)
2139
2155
2140 def runone():
2156 def runone():
2141 # the lock is held for the duration the initialisation
2157 # the lock is held for the duration the initialisation
2142 for chunk in generate(repo):
2158 for chunk in generate(repo):
2143 pass
2159 pass
2144
2160
2145 timer(runone, title=b"generate")
2161 timer(runone, title=b"generate")
2146 fm.end()
2162 fm.end()
2147
2163
2148
2164
2149 @command(
2165 @command(
2150 b'perf::stream-consume',
2166 b'perf::stream-consume',
2151 formatteropts,
2167 formatteropts,
2152 )
2168 )
2153 def perf_stream_clone_consume(ui, repo, filename, **opts):
2169 def perf_stream_clone_consume(ui, repo, filename, **opts):
2154 """benchmark the full application of a stream clone
2170 """benchmark the full application of a stream clone
2155
2171
2156 This include the creation of the repository
2172 This include the creation of the repository
2157 """
2173 """
2158 # try except to appease check code
2174 # try except to appease check code
2159 msg = b"mercurial too old, missing necessary module: %s"
2175 msg = b"mercurial too old, missing necessary module: %s"
2160 try:
2176 try:
2161 from mercurial import bundle2
2177 from mercurial import bundle2
2162 except ImportError as exc:
2178 except ImportError as exc:
2163 msg %= _bytestr(exc)
2179 msg %= _bytestr(exc)
2164 raise error.Abort(msg)
2180 raise error.Abort(msg)
2165 try:
2181 try:
2166 from mercurial import exchange
2182 from mercurial import exchange
2167 except ImportError as exc:
2183 except ImportError as exc:
2168 msg %= _bytestr(exc)
2184 msg %= _bytestr(exc)
2169 raise error.Abort(msg)
2185 raise error.Abort(msg)
2170 try:
2186 try:
2171 from mercurial import hg
2187 from mercurial import hg
2172 except ImportError as exc:
2188 except ImportError as exc:
2173 msg %= _bytestr(exc)
2189 msg %= _bytestr(exc)
2174 raise error.Abort(msg)
2190 raise error.Abort(msg)
2175 try:
2191 try:
2176 from mercurial import localrepo
2192 from mercurial import localrepo
2177 except ImportError as exc:
2193 except ImportError as exc:
2178 msg %= _bytestr(exc)
2194 msg %= _bytestr(exc)
2179 raise error.Abort(msg)
2195 raise error.Abort(msg)
2180
2196
2181 opts = _byteskwargs(opts)
2197 opts = _byteskwargs(opts)
2182 timer, fm = gettimer(ui, opts)
2198 timer, fm = gettimer(ui, opts)
2183
2199
2184 # deletion of the generator may trigger some cleanup that we do not want to
2200 # deletion of the generator may trigger some cleanup that we do not want to
2185 # measure
2201 # measure
2186 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2202 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2187 raise error.Abort("not a readable file: %s" % filename)
2203 raise error.Abort("not a readable file: %s" % filename)
2188
2204
2189 run_variables = [None, None]
2205 run_variables = [None, None]
2190
2206
2191 # we create the new repository next to the other one for two reasons:
2207 # we create the new repository next to the other one for two reasons:
2192 # - this way we use the same file system, which are relevant for benchmark
2208 # - this way we use the same file system, which are relevant for benchmark
2193 # - if /tmp/ is small, the operation could overfills it.
2209 # - if /tmp/ is small, the operation could overfills it.
2194 source_repo_dir = os.path.dirname(repo.root)
2210 source_repo_dir = os.path.dirname(repo.root)
2195
2211
2196 @contextlib.contextmanager
2212 @contextlib.contextmanager
2197 def context():
2213 def context():
2198 with open(filename, mode='rb') as bundle:
2214 with open(filename, mode='rb') as bundle:
2199 with tempfile.TemporaryDirectory(
2215 with tempfile.TemporaryDirectory(
2200 prefix=b'hg-perf-stream-consume-',
2216 prefix=b'hg-perf-stream-consume-',
2201 dir=source_repo_dir,
2217 dir=source_repo_dir,
2202 ) as tmp_dir:
2218 ) as tmp_dir:
2203 tmp_dir = fsencode(tmp_dir)
2219 tmp_dir = fsencode(tmp_dir)
2204 run_variables[0] = bundle
2220 run_variables[0] = bundle
2205 run_variables[1] = tmp_dir
2221 run_variables[1] = tmp_dir
2206 yield
2222 yield
2207 run_variables[0] = None
2223 run_variables[0] = None
2208 run_variables[1] = None
2224 run_variables[1] = None
2209
2225
2210 def runone():
2226 def runone():
2211 bundle = run_variables[0]
2227 bundle = run_variables[0]
2212 tmp_dir = run_variables[1]
2228 tmp_dir = run_variables[1]
2213
2229
2214 # we actually wants to copy all config to ensure the repo config is
2230 # we actually wants to copy all config to ensure the repo config is
2215 # taken in account during the benchmark
2231 # taken in account during the benchmark
2216 new_ui = repo.ui.__class__(repo.ui)
2232 new_ui = repo.ui.__class__(repo.ui)
2217 # only pass ui when no srcrepo
2233 # only pass ui when no srcrepo
2218 localrepo.createrepository(
2234 localrepo.createrepository(
2219 new_ui, tmp_dir, requirements=repo.requirements
2235 new_ui, tmp_dir, requirements=repo.requirements
2220 )
2236 )
2221 target = hg.repository(new_ui, tmp_dir)
2237 target = hg.repository(new_ui, tmp_dir)
2222 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2238 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2223 # stream v1
2239 # stream v1
2224 if util.safehasattr(gen, 'apply'):
2240 if util.safehasattr(gen, 'apply'):
2225 gen.apply(target)
2241 gen.apply(target)
2226 else:
2242 else:
2227 with target.transaction(b"perf::stream-consume") as tr:
2243 with target.transaction(b"perf::stream-consume") as tr:
2228 bundle2.applybundle(
2244 bundle2.applybundle(
2229 target,
2245 target,
2230 gen,
2246 gen,
2231 tr,
2247 tr,
2232 source=b'unbundle',
2248 source=b'unbundle',
2233 url=filename,
2249 url=filename,
2234 )
2250 )
2235
2251
2236 timer(runone, context=context, title=b"consume")
2252 timer(runone, context=context, title=b"consume")
2237 fm.end()
2253 fm.end()
2238
2254
2239
2255
2240 @command(b'perf::parents|perfparents', formatteropts)
2256 @command(b'perf::parents|perfparents', formatteropts)
2241 def perfparents(ui, repo, **opts):
2257 def perfparents(ui, repo, **opts):
2242 """benchmark the time necessary to fetch one changeset's parents.
2258 """benchmark the time necessary to fetch one changeset's parents.
2243
2259
2244 The fetch is done using the `node identifier`, traversing all object layers
2260 The fetch is done using the `node identifier`, traversing all object layers
2245 from the repository object. The first N revisions will be used for this
2261 from the repository object. The first N revisions will be used for this
2246 benchmark. N is controlled by the ``perf.parentscount`` config option
2262 benchmark. N is controlled by the ``perf.parentscount`` config option
2247 (default: 1000).
2263 (default: 1000).
2248 """
2264 """
2249 opts = _byteskwargs(opts)
2265 opts = _byteskwargs(opts)
2250 timer, fm = gettimer(ui, opts)
2266 timer, fm = gettimer(ui, opts)
2251 # control the number of commits perfparents iterates over
2267 # control the number of commits perfparents iterates over
2252 # experimental config: perf.parentscount
2268 # experimental config: perf.parentscount
2253 count = getint(ui, b"perf", b"parentscount", 1000)
2269 count = getint(ui, b"perf", b"parentscount", 1000)
2254 if len(repo.changelog) < count:
2270 if len(repo.changelog) < count:
2255 raise error.Abort(b"repo needs %d commits for this test" % count)
2271 raise error.Abort(b"repo needs %d commits for this test" % count)
2256 repo = repo.unfiltered()
2272 repo = repo.unfiltered()
2257 nl = [repo.changelog.node(i) for i in _xrange(count)]
2273 nl = [repo.changelog.node(i) for i in _xrange(count)]
2258
2274
2259 def d():
2275 def d():
2260 for n in nl:
2276 for n in nl:
2261 repo.changelog.parents(n)
2277 repo.changelog.parents(n)
2262
2278
2263 timer(d)
2279 timer(d)
2264 fm.end()
2280 fm.end()
2265
2281
2266
2282
2267 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2283 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2268 def perfctxfiles(ui, repo, x, **opts):
2284 def perfctxfiles(ui, repo, x, **opts):
2269 opts = _byteskwargs(opts)
2285 opts = _byteskwargs(opts)
2270 x = int(x)
2286 x = int(x)
2271 timer, fm = gettimer(ui, opts)
2287 timer, fm = gettimer(ui, opts)
2272
2288
2273 def d():
2289 def d():
2274 len(repo[x].files())
2290 len(repo[x].files())
2275
2291
2276 timer(d)
2292 timer(d)
2277 fm.end()
2293 fm.end()
2278
2294
2279
2295
2280 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2296 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2281 def perfrawfiles(ui, repo, x, **opts):
2297 def perfrawfiles(ui, repo, x, **opts):
2282 opts = _byteskwargs(opts)
2298 opts = _byteskwargs(opts)
2283 x = int(x)
2299 x = int(x)
2284 timer, fm = gettimer(ui, opts)
2300 timer, fm = gettimer(ui, opts)
2285 cl = repo.changelog
2301 cl = repo.changelog
2286
2302
2287 def d():
2303 def d():
2288 len(cl.read(x)[3])
2304 len(cl.read(x)[3])
2289
2305
2290 timer(d)
2306 timer(d)
2291 fm.end()
2307 fm.end()
2292
2308
2293
2309
2294 @command(b'perf::lookup|perflookup', formatteropts)
2310 @command(b'perf::lookup|perflookup', formatteropts)
2295 def perflookup(ui, repo, rev, **opts):
2311 def perflookup(ui, repo, rev, **opts):
2296 opts = _byteskwargs(opts)
2312 opts = _byteskwargs(opts)
2297 timer, fm = gettimer(ui, opts)
2313 timer, fm = gettimer(ui, opts)
2298 timer(lambda: len(repo.lookup(rev)))
2314 timer(lambda: len(repo.lookup(rev)))
2299 fm.end()
2315 fm.end()
2300
2316
2301
2317
2302 @command(
2318 @command(
2303 b'perf::linelogedits|perflinelogedits',
2319 b'perf::linelogedits|perflinelogedits',
2304 [
2320 [
2305 (b'n', b'edits', 10000, b'number of edits'),
2321 (b'n', b'edits', 10000, b'number of edits'),
2306 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2322 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2307 ],
2323 ],
2308 norepo=True,
2324 norepo=True,
2309 )
2325 )
2310 def perflinelogedits(ui, **opts):
2326 def perflinelogedits(ui, **opts):
2311 from mercurial import linelog
2327 from mercurial import linelog
2312
2328
2313 opts = _byteskwargs(opts)
2329 opts = _byteskwargs(opts)
2314
2330
2315 edits = opts[b'edits']
2331 edits = opts[b'edits']
2316 maxhunklines = opts[b'max_hunk_lines']
2332 maxhunklines = opts[b'max_hunk_lines']
2317
2333
2318 maxb1 = 100000
2334 maxb1 = 100000
2319 random.seed(0)
2335 random.seed(0)
2320 randint = random.randint
2336 randint = random.randint
2321 currentlines = 0
2337 currentlines = 0
2322 arglist = []
2338 arglist = []
2323 for rev in _xrange(edits):
2339 for rev in _xrange(edits):
2324 a1 = randint(0, currentlines)
2340 a1 = randint(0, currentlines)
2325 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2341 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2326 b1 = randint(0, maxb1)
2342 b1 = randint(0, maxb1)
2327 b2 = randint(b1, b1 + maxhunklines)
2343 b2 = randint(b1, b1 + maxhunklines)
2328 currentlines += (b2 - b1) - (a2 - a1)
2344 currentlines += (b2 - b1) - (a2 - a1)
2329 arglist.append((rev, a1, a2, b1, b2))
2345 arglist.append((rev, a1, a2, b1, b2))
2330
2346
2331 def d():
2347 def d():
2332 ll = linelog.linelog()
2348 ll = linelog.linelog()
2333 for args in arglist:
2349 for args in arglist:
2334 ll.replacelines(*args)
2350 ll.replacelines(*args)
2335
2351
2336 timer, fm = gettimer(ui, opts)
2352 timer, fm = gettimer(ui, opts)
2337 timer(d)
2353 timer(d)
2338 fm.end()
2354 fm.end()
2339
2355
2340
2356
2341 @command(b'perf::revrange|perfrevrange', formatteropts)
2357 @command(b'perf::revrange|perfrevrange', formatteropts)
2342 def perfrevrange(ui, repo, *specs, **opts):
2358 def perfrevrange(ui, repo, *specs, **opts):
2343 opts = _byteskwargs(opts)
2359 opts = _byteskwargs(opts)
2344 timer, fm = gettimer(ui, opts)
2360 timer, fm = gettimer(ui, opts)
2345 revrange = scmutil.revrange
2361 revrange = scmutil.revrange
2346 timer(lambda: len(revrange(repo, specs)))
2362 timer(lambda: len(revrange(repo, specs)))
2347 fm.end()
2363 fm.end()
2348
2364
2349
2365
2350 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2366 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2351 def perfnodelookup(ui, repo, rev, **opts):
2367 def perfnodelookup(ui, repo, rev, **opts):
2352 opts = _byteskwargs(opts)
2368 opts = _byteskwargs(opts)
2353 timer, fm = gettimer(ui, opts)
2369 timer, fm = gettimer(ui, opts)
2354 import mercurial.revlog
2370 import mercurial.revlog
2355
2371
2356 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2372 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2357 n = scmutil.revsingle(repo, rev).node()
2373 n = scmutil.revsingle(repo, rev).node()
2358
2374
2359 try:
2375 try:
2360 cl = revlog(getsvfs(repo), radix=b"00changelog")
2376 cl = revlog(getsvfs(repo), radix=b"00changelog")
2361 except TypeError:
2377 except TypeError:
2362 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2378 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2363
2379
2364 def d():
2380 def d():
2365 cl.rev(n)
2381 cl.rev(n)
2366 clearcaches(cl)
2382 clearcaches(cl)
2367
2383
2368 timer(d)
2384 timer(d)
2369 fm.end()
2385 fm.end()
2370
2386
2371
2387
2372 @command(
2388 @command(
2373 b'perf::log|perflog',
2389 b'perf::log|perflog',
2374 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2390 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2375 )
2391 )
2376 def perflog(ui, repo, rev=None, **opts):
2392 def perflog(ui, repo, rev=None, **opts):
2377 opts = _byteskwargs(opts)
2393 opts = _byteskwargs(opts)
2378 if rev is None:
2394 if rev is None:
2379 rev = []
2395 rev = []
2380 timer, fm = gettimer(ui, opts)
2396 timer, fm = gettimer(ui, opts)
2381 ui.pushbuffer()
2397 ui.pushbuffer()
2382 timer(
2398 timer(
2383 lambda: commands.log(
2399 lambda: commands.log(
2384 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2400 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2385 )
2401 )
2386 )
2402 )
2387 ui.popbuffer()
2403 ui.popbuffer()
2388 fm.end()
2404 fm.end()
2389
2405
2390
2406
2391 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2407 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2392 def perfmoonwalk(ui, repo, **opts):
2408 def perfmoonwalk(ui, repo, **opts):
2393 """benchmark walking the changelog backwards
2409 """benchmark walking the changelog backwards
2394
2410
2395 This also loads the changelog data for each revision in the changelog.
2411 This also loads the changelog data for each revision in the changelog.
2396 """
2412 """
2397 opts = _byteskwargs(opts)
2413 opts = _byteskwargs(opts)
2398 timer, fm = gettimer(ui, opts)
2414 timer, fm = gettimer(ui, opts)
2399
2415
2400 def moonwalk():
2416 def moonwalk():
2401 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2417 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2402 ctx = repo[i]
2418 ctx = repo[i]
2403 ctx.branch() # read changelog data (in addition to the index)
2419 ctx.branch() # read changelog data (in addition to the index)
2404
2420
2405 timer(moonwalk)
2421 timer(moonwalk)
2406 fm.end()
2422 fm.end()
2407
2423
2408
2424
2409 @command(
2425 @command(
2410 b'perf::templating|perftemplating',
2426 b'perf::templating|perftemplating',
2411 [
2427 [
2412 (b'r', b'rev', [], b'revisions to run the template on'),
2428 (b'r', b'rev', [], b'revisions to run the template on'),
2413 ]
2429 ]
2414 + formatteropts,
2430 + formatteropts,
2415 )
2431 )
2416 def perftemplating(ui, repo, testedtemplate=None, **opts):
2432 def perftemplating(ui, repo, testedtemplate=None, **opts):
2417 """test the rendering time of a given template"""
2433 """test the rendering time of a given template"""
2418 if makelogtemplater is None:
2434 if makelogtemplater is None:
2419 raise error.Abort(
2435 raise error.Abort(
2420 b"perftemplating not available with this Mercurial",
2436 b"perftemplating not available with this Mercurial",
2421 hint=b"use 4.3 or later",
2437 hint=b"use 4.3 or later",
2422 )
2438 )
2423
2439
2424 opts = _byteskwargs(opts)
2440 opts = _byteskwargs(opts)
2425
2441
2426 nullui = ui.copy()
2442 nullui = ui.copy()
2427 nullui.fout = open(os.devnull, 'wb')
2443 nullui.fout = open(os.devnull, 'wb')
2428 nullui.disablepager()
2444 nullui.disablepager()
2429 revs = opts.get(b'rev')
2445 revs = opts.get(b'rev')
2430 if not revs:
2446 if not revs:
2431 revs = [b'all()']
2447 revs = [b'all()']
2432 revs = list(scmutil.revrange(repo, revs))
2448 revs = list(scmutil.revrange(repo, revs))
2433
2449
2434 defaulttemplate = (
2450 defaulttemplate = (
2435 b'{date|shortdate} [{rev}:{node|short}]'
2451 b'{date|shortdate} [{rev}:{node|short}]'
2436 b' {author|person}: {desc|firstline}\n'
2452 b' {author|person}: {desc|firstline}\n'
2437 )
2453 )
2438 if testedtemplate is None:
2454 if testedtemplate is None:
2439 testedtemplate = defaulttemplate
2455 testedtemplate = defaulttemplate
2440 displayer = makelogtemplater(nullui, repo, testedtemplate)
2456 displayer = makelogtemplater(nullui, repo, testedtemplate)
2441
2457
2442 def format():
2458 def format():
2443 for r in revs:
2459 for r in revs:
2444 ctx = repo[r]
2460 ctx = repo[r]
2445 displayer.show(ctx)
2461 displayer.show(ctx)
2446 displayer.flush(ctx)
2462 displayer.flush(ctx)
2447
2463
2448 timer, fm = gettimer(ui, opts)
2464 timer, fm = gettimer(ui, opts)
2449 timer(format)
2465 timer(format)
2450 fm.end()
2466 fm.end()
2451
2467
2452
2468
2453 def _displaystats(ui, opts, entries, data):
2469 def _displaystats(ui, opts, entries, data):
2454 # use a second formatter because the data are quite different, not sure
2470 # use a second formatter because the data are quite different, not sure
2455 # how it flies with the templater.
2471 # how it flies with the templater.
2456 fm = ui.formatter(b'perf-stats', opts)
2472 fm = ui.formatter(b'perf-stats', opts)
2457 for key, title in entries:
2473 for key, title in entries:
2458 values = data[key]
2474 values = data[key]
2459 nbvalues = len(data)
2475 nbvalues = len(data)
2460 values.sort()
2476 values.sort()
2461 stats = {
2477 stats = {
2462 'key': key,
2478 'key': key,
2463 'title': title,
2479 'title': title,
2464 'nbitems': len(values),
2480 'nbitems': len(values),
2465 'min': values[0][0],
2481 'min': values[0][0],
2466 '10%': values[(nbvalues * 10) // 100][0],
2482 '10%': values[(nbvalues * 10) // 100][0],
2467 '25%': values[(nbvalues * 25) // 100][0],
2483 '25%': values[(nbvalues * 25) // 100][0],
2468 '50%': values[(nbvalues * 50) // 100][0],
2484 '50%': values[(nbvalues * 50) // 100][0],
2469 '75%': values[(nbvalues * 75) // 100][0],
2485 '75%': values[(nbvalues * 75) // 100][0],
2470 '80%': values[(nbvalues * 80) // 100][0],
2486 '80%': values[(nbvalues * 80) // 100][0],
2471 '85%': values[(nbvalues * 85) // 100][0],
2487 '85%': values[(nbvalues * 85) // 100][0],
2472 '90%': values[(nbvalues * 90) // 100][0],
2488 '90%': values[(nbvalues * 90) // 100][0],
2473 '95%': values[(nbvalues * 95) // 100][0],
2489 '95%': values[(nbvalues * 95) // 100][0],
2474 '99%': values[(nbvalues * 99) // 100][0],
2490 '99%': values[(nbvalues * 99) // 100][0],
2475 'max': values[-1][0],
2491 'max': values[-1][0],
2476 }
2492 }
2477 fm.startitem()
2493 fm.startitem()
2478 fm.data(**stats)
2494 fm.data(**stats)
2479 # make node pretty for the human output
2495 # make node pretty for the human output
2480 fm.plain('### %s (%d items)\n' % (title, len(values)))
2496 fm.plain('### %s (%d items)\n' % (title, len(values)))
2481 lines = [
2497 lines = [
2482 'min',
2498 'min',
2483 '10%',
2499 '10%',
2484 '25%',
2500 '25%',
2485 '50%',
2501 '50%',
2486 '75%',
2502 '75%',
2487 '80%',
2503 '80%',
2488 '85%',
2504 '85%',
2489 '90%',
2505 '90%',
2490 '95%',
2506 '95%',
2491 '99%',
2507 '99%',
2492 'max',
2508 'max',
2493 ]
2509 ]
2494 for l in lines:
2510 for l in lines:
2495 fm.plain('%s: %s\n' % (l, stats[l]))
2511 fm.plain('%s: %s\n' % (l, stats[l]))
2496 fm.end()
2512 fm.end()
2497
2513
2498
2514
2499 @command(
2515 @command(
2500 b'perf::helper-mergecopies|perfhelper-mergecopies',
2516 b'perf::helper-mergecopies|perfhelper-mergecopies',
2501 formatteropts
2517 formatteropts
2502 + [
2518 + [
2503 (b'r', b'revs', [], b'restrict search to these revisions'),
2519 (b'r', b'revs', [], b'restrict search to these revisions'),
2504 (b'', b'timing', False, b'provides extra data (costly)'),
2520 (b'', b'timing', False, b'provides extra data (costly)'),
2505 (b'', b'stats', False, b'provides statistic about the measured data'),
2521 (b'', b'stats', False, b'provides statistic about the measured data'),
2506 ],
2522 ],
2507 )
2523 )
2508 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2524 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2509 """find statistics about potential parameters for `perfmergecopies`
2525 """find statistics about potential parameters for `perfmergecopies`
2510
2526
2511 This command find (base, p1, p2) triplet relevant for copytracing
2527 This command find (base, p1, p2) triplet relevant for copytracing
2512 benchmarking in the context of a merge. It reports values for some of the
2528 benchmarking in the context of a merge. It reports values for some of the
2513 parameters that impact merge copy tracing time during merge.
2529 parameters that impact merge copy tracing time during merge.
2514
2530
2515 If `--timing` is set, rename detection is run and the associated timing
2531 If `--timing` is set, rename detection is run and the associated timing
2516 will be reported. The extra details come at the cost of slower command
2532 will be reported. The extra details come at the cost of slower command
2517 execution.
2533 execution.
2518
2534
2519 Since rename detection is only run once, other factors might easily
2535 Since rename detection is only run once, other factors might easily
2520 affect the precision of the timing. However it should give a good
2536 affect the precision of the timing. However it should give a good
2521 approximation of which revision triplets are very costly.
2537 approximation of which revision triplets are very costly.
2522 """
2538 """
2523 opts = _byteskwargs(opts)
2539 opts = _byteskwargs(opts)
2524 fm = ui.formatter(b'perf', opts)
2540 fm = ui.formatter(b'perf', opts)
2525 dotiming = opts[b'timing']
2541 dotiming = opts[b'timing']
2526 dostats = opts[b'stats']
2542 dostats = opts[b'stats']
2527
2543
2528 output_template = [
2544 output_template = [
2529 ("base", "%(base)12s"),
2545 ("base", "%(base)12s"),
2530 ("p1", "%(p1.node)12s"),
2546 ("p1", "%(p1.node)12s"),
2531 ("p2", "%(p2.node)12s"),
2547 ("p2", "%(p2.node)12s"),
2532 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2548 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2533 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2549 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2534 ("p1.renames", "%(p1.renamedfiles)12d"),
2550 ("p1.renames", "%(p1.renamedfiles)12d"),
2535 ("p1.time", "%(p1.time)12.3f"),
2551 ("p1.time", "%(p1.time)12.3f"),
2536 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2552 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2537 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2553 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2538 ("p2.renames", "%(p2.renamedfiles)12d"),
2554 ("p2.renames", "%(p2.renamedfiles)12d"),
2539 ("p2.time", "%(p2.time)12.3f"),
2555 ("p2.time", "%(p2.time)12.3f"),
2540 ("renames", "%(nbrenamedfiles)12d"),
2556 ("renames", "%(nbrenamedfiles)12d"),
2541 ("total.time", "%(time)12.3f"),
2557 ("total.time", "%(time)12.3f"),
2542 ]
2558 ]
2543 if not dotiming:
2559 if not dotiming:
2544 output_template = [
2560 output_template = [
2545 i
2561 i
2546 for i in output_template
2562 for i in output_template
2547 if not ('time' in i[0] or 'renames' in i[0])
2563 if not ('time' in i[0] or 'renames' in i[0])
2548 ]
2564 ]
2549 header_names = [h for (h, v) in output_template]
2565 header_names = [h for (h, v) in output_template]
2550 output = ' '.join([v for (h, v) in output_template]) + '\n'
2566 output = ' '.join([v for (h, v) in output_template]) + '\n'
2551 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2567 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2552 fm.plain(header % tuple(header_names))
2568 fm.plain(header % tuple(header_names))
2553
2569
2554 if not revs:
2570 if not revs:
2555 revs = ['all()']
2571 revs = ['all()']
2556 revs = scmutil.revrange(repo, revs)
2572 revs = scmutil.revrange(repo, revs)
2557
2573
2558 if dostats:
2574 if dostats:
2559 alldata = {
2575 alldata = {
2560 'nbrevs': [],
2576 'nbrevs': [],
2561 'nbmissingfiles': [],
2577 'nbmissingfiles': [],
2562 }
2578 }
2563 if dotiming:
2579 if dotiming:
2564 alldata['parentnbrenames'] = []
2580 alldata['parentnbrenames'] = []
2565 alldata['totalnbrenames'] = []
2581 alldata['totalnbrenames'] = []
2566 alldata['parenttime'] = []
2582 alldata['parenttime'] = []
2567 alldata['totaltime'] = []
2583 alldata['totaltime'] = []
2568
2584
2569 roi = repo.revs('merge() and %ld', revs)
2585 roi = repo.revs('merge() and %ld', revs)
2570 for r in roi:
2586 for r in roi:
2571 ctx = repo[r]
2587 ctx = repo[r]
2572 p1 = ctx.p1()
2588 p1 = ctx.p1()
2573 p2 = ctx.p2()
2589 p2 = ctx.p2()
2574 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2590 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2575 for b in bases:
2591 for b in bases:
2576 b = repo[b]
2592 b = repo[b]
2577 p1missing = copies._computeforwardmissing(b, p1)
2593 p1missing = copies._computeforwardmissing(b, p1)
2578 p2missing = copies._computeforwardmissing(b, p2)
2594 p2missing = copies._computeforwardmissing(b, p2)
2579 data = {
2595 data = {
2580 b'base': b.hex(),
2596 b'base': b.hex(),
2581 b'p1.node': p1.hex(),
2597 b'p1.node': p1.hex(),
2582 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2598 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2583 b'p1.nbmissingfiles': len(p1missing),
2599 b'p1.nbmissingfiles': len(p1missing),
2584 b'p2.node': p2.hex(),
2600 b'p2.node': p2.hex(),
2585 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2601 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2586 b'p2.nbmissingfiles': len(p2missing),
2602 b'p2.nbmissingfiles': len(p2missing),
2587 }
2603 }
2588 if dostats:
2604 if dostats:
2589 if p1missing:
2605 if p1missing:
2590 alldata['nbrevs'].append(
2606 alldata['nbrevs'].append(
2591 (data['p1.nbrevs'], b.hex(), p1.hex())
2607 (data['p1.nbrevs'], b.hex(), p1.hex())
2592 )
2608 )
2593 alldata['nbmissingfiles'].append(
2609 alldata['nbmissingfiles'].append(
2594 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2610 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2595 )
2611 )
2596 if p2missing:
2612 if p2missing:
2597 alldata['nbrevs'].append(
2613 alldata['nbrevs'].append(
2598 (data['p2.nbrevs'], b.hex(), p2.hex())
2614 (data['p2.nbrevs'], b.hex(), p2.hex())
2599 )
2615 )
2600 alldata['nbmissingfiles'].append(
2616 alldata['nbmissingfiles'].append(
2601 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2617 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2602 )
2618 )
2603 if dotiming:
2619 if dotiming:
2604 begin = util.timer()
2620 begin = util.timer()
2605 mergedata = copies.mergecopies(repo, p1, p2, b)
2621 mergedata = copies.mergecopies(repo, p1, p2, b)
2606 end = util.timer()
2622 end = util.timer()
2607 # not very stable timing since we did only one run
2623 # not very stable timing since we did only one run
2608 data['time'] = end - begin
2624 data['time'] = end - begin
2609 # mergedata contains five dicts: "copy", "movewithdir",
2625 # mergedata contains five dicts: "copy", "movewithdir",
2610 # "diverge", "renamedelete" and "dirmove".
2626 # "diverge", "renamedelete" and "dirmove".
2611 # The first 4 are about renamed file so lets count that.
2627 # The first 4 are about renamed file so lets count that.
2612 renames = len(mergedata[0])
2628 renames = len(mergedata[0])
2613 renames += len(mergedata[1])
2629 renames += len(mergedata[1])
2614 renames += len(mergedata[2])
2630 renames += len(mergedata[2])
2615 renames += len(mergedata[3])
2631 renames += len(mergedata[3])
2616 data['nbrenamedfiles'] = renames
2632 data['nbrenamedfiles'] = renames
2617 begin = util.timer()
2633 begin = util.timer()
2618 p1renames = copies.pathcopies(b, p1)
2634 p1renames = copies.pathcopies(b, p1)
2619 end = util.timer()
2635 end = util.timer()
2620 data['p1.time'] = end - begin
2636 data['p1.time'] = end - begin
2621 begin = util.timer()
2637 begin = util.timer()
2622 p2renames = copies.pathcopies(b, p2)
2638 p2renames = copies.pathcopies(b, p2)
2623 end = util.timer()
2639 end = util.timer()
2624 data['p2.time'] = end - begin
2640 data['p2.time'] = end - begin
2625 data['p1.renamedfiles'] = len(p1renames)
2641 data['p1.renamedfiles'] = len(p1renames)
2626 data['p2.renamedfiles'] = len(p2renames)
2642 data['p2.renamedfiles'] = len(p2renames)
2627
2643
2628 if dostats:
2644 if dostats:
2629 if p1missing:
2645 if p1missing:
2630 alldata['parentnbrenames'].append(
2646 alldata['parentnbrenames'].append(
2631 (data['p1.renamedfiles'], b.hex(), p1.hex())
2647 (data['p1.renamedfiles'], b.hex(), p1.hex())
2632 )
2648 )
2633 alldata['parenttime'].append(
2649 alldata['parenttime'].append(
2634 (data['p1.time'], b.hex(), p1.hex())
2650 (data['p1.time'], b.hex(), p1.hex())
2635 )
2651 )
2636 if p2missing:
2652 if p2missing:
2637 alldata['parentnbrenames'].append(
2653 alldata['parentnbrenames'].append(
2638 (data['p2.renamedfiles'], b.hex(), p2.hex())
2654 (data['p2.renamedfiles'], b.hex(), p2.hex())
2639 )
2655 )
2640 alldata['parenttime'].append(
2656 alldata['parenttime'].append(
2641 (data['p2.time'], b.hex(), p2.hex())
2657 (data['p2.time'], b.hex(), p2.hex())
2642 )
2658 )
2643 if p1missing or p2missing:
2659 if p1missing or p2missing:
2644 alldata['totalnbrenames'].append(
2660 alldata['totalnbrenames'].append(
2645 (
2661 (
2646 data['nbrenamedfiles'],
2662 data['nbrenamedfiles'],
2647 b.hex(),
2663 b.hex(),
2648 p1.hex(),
2664 p1.hex(),
2649 p2.hex(),
2665 p2.hex(),
2650 )
2666 )
2651 )
2667 )
2652 alldata['totaltime'].append(
2668 alldata['totaltime'].append(
2653 (data['time'], b.hex(), p1.hex(), p2.hex())
2669 (data['time'], b.hex(), p1.hex(), p2.hex())
2654 )
2670 )
2655 fm.startitem()
2671 fm.startitem()
2656 fm.data(**data)
2672 fm.data(**data)
2657 # make node pretty for the human output
2673 # make node pretty for the human output
2658 out = data.copy()
2674 out = data.copy()
2659 out['base'] = fm.hexfunc(b.node())
2675 out['base'] = fm.hexfunc(b.node())
2660 out['p1.node'] = fm.hexfunc(p1.node())
2676 out['p1.node'] = fm.hexfunc(p1.node())
2661 out['p2.node'] = fm.hexfunc(p2.node())
2677 out['p2.node'] = fm.hexfunc(p2.node())
2662 fm.plain(output % out)
2678 fm.plain(output % out)
2663
2679
2664 fm.end()
2680 fm.end()
2665 if dostats:
2681 if dostats:
2666 # use a second formatter because the data are quite different, not sure
2682 # use a second formatter because the data are quite different, not sure
2667 # how it flies with the templater.
2683 # how it flies with the templater.
2668 entries = [
2684 entries = [
2669 ('nbrevs', 'number of revision covered'),
2685 ('nbrevs', 'number of revision covered'),
2670 ('nbmissingfiles', 'number of missing files at head'),
2686 ('nbmissingfiles', 'number of missing files at head'),
2671 ]
2687 ]
2672 if dotiming:
2688 if dotiming:
2673 entries.append(
2689 entries.append(
2674 ('parentnbrenames', 'rename from one parent to base')
2690 ('parentnbrenames', 'rename from one parent to base')
2675 )
2691 )
2676 entries.append(('totalnbrenames', 'total number of renames'))
2692 entries.append(('totalnbrenames', 'total number of renames'))
2677 entries.append(('parenttime', 'time for one parent'))
2693 entries.append(('parenttime', 'time for one parent'))
2678 entries.append(('totaltime', 'time for both parents'))
2694 entries.append(('totaltime', 'time for both parents'))
2679 _displaystats(ui, opts, entries, alldata)
2695 _displaystats(ui, opts, entries, alldata)
2680
2696
2681
2697
2682 @command(
2698 @command(
2683 b'perf::helper-pathcopies|perfhelper-pathcopies',
2699 b'perf::helper-pathcopies|perfhelper-pathcopies',
2684 formatteropts
2700 formatteropts
2685 + [
2701 + [
2686 (b'r', b'revs', [], b'restrict search to these revisions'),
2702 (b'r', b'revs', [], b'restrict search to these revisions'),
2687 (b'', b'timing', False, b'provides extra data (costly)'),
2703 (b'', b'timing', False, b'provides extra data (costly)'),
2688 (b'', b'stats', False, b'provides statistic about the measured data'),
2704 (b'', b'stats', False, b'provides statistic about the measured data'),
2689 ],
2705 ],
2690 )
2706 )
2691 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2707 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2692 """find statistic about potential parameters for the `perftracecopies`
2708 """find statistic about potential parameters for the `perftracecopies`
2693
2709
2694 This command find source-destination pair relevant for copytracing testing.
2710 This command find source-destination pair relevant for copytracing testing.
2695 It report value for some of the parameters that impact copy tracing time.
2711 It report value for some of the parameters that impact copy tracing time.
2696
2712
2697 If `--timing` is set, rename detection is run and the associated timing
2713 If `--timing` is set, rename detection is run and the associated timing
2698 will be reported. The extra details comes at the cost of a slower command
2714 will be reported. The extra details comes at the cost of a slower command
2699 execution.
2715 execution.
2700
2716
2701 Since the rename detection is only run once, other factors might easily
2717 Since the rename detection is only run once, other factors might easily
2702 affect the precision of the timing. However it should give a good
2718 affect the precision of the timing. However it should give a good
2703 approximation of which revision pairs are very costly.
2719 approximation of which revision pairs are very costly.
2704 """
2720 """
2705 opts = _byteskwargs(opts)
2721 opts = _byteskwargs(opts)
2706 fm = ui.formatter(b'perf', opts)
2722 fm = ui.formatter(b'perf', opts)
2707 dotiming = opts[b'timing']
2723 dotiming = opts[b'timing']
2708 dostats = opts[b'stats']
2724 dostats = opts[b'stats']
2709
2725
2710 if dotiming:
2726 if dotiming:
2711 header = '%12s %12s %12s %12s %12s %12s\n'
2727 header = '%12s %12s %12s %12s %12s %12s\n'
2712 output = (
2728 output = (
2713 "%(source)12s %(destination)12s "
2729 "%(source)12s %(destination)12s "
2714 "%(nbrevs)12d %(nbmissingfiles)12d "
2730 "%(nbrevs)12d %(nbmissingfiles)12d "
2715 "%(nbrenamedfiles)12d %(time)18.5f\n"
2731 "%(nbrenamedfiles)12d %(time)18.5f\n"
2716 )
2732 )
2717 header_names = (
2733 header_names = (
2718 "source",
2734 "source",
2719 "destination",
2735 "destination",
2720 "nb-revs",
2736 "nb-revs",
2721 "nb-files",
2737 "nb-files",
2722 "nb-renames",
2738 "nb-renames",
2723 "time",
2739 "time",
2724 )
2740 )
2725 fm.plain(header % header_names)
2741 fm.plain(header % header_names)
2726 else:
2742 else:
2727 header = '%12s %12s %12s %12s\n'
2743 header = '%12s %12s %12s %12s\n'
2728 output = (
2744 output = (
2729 "%(source)12s %(destination)12s "
2745 "%(source)12s %(destination)12s "
2730 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2746 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2731 )
2747 )
2732 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2748 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2733
2749
2734 if not revs:
2750 if not revs:
2735 revs = ['all()']
2751 revs = ['all()']
2736 revs = scmutil.revrange(repo, revs)
2752 revs = scmutil.revrange(repo, revs)
2737
2753
2738 if dostats:
2754 if dostats:
2739 alldata = {
2755 alldata = {
2740 'nbrevs': [],
2756 'nbrevs': [],
2741 'nbmissingfiles': [],
2757 'nbmissingfiles': [],
2742 }
2758 }
2743 if dotiming:
2759 if dotiming:
2744 alldata['nbrenames'] = []
2760 alldata['nbrenames'] = []
2745 alldata['time'] = []
2761 alldata['time'] = []
2746
2762
2747 roi = repo.revs('merge() and %ld', revs)
2763 roi = repo.revs('merge() and %ld', revs)
2748 for r in roi:
2764 for r in roi:
2749 ctx = repo[r]
2765 ctx = repo[r]
2750 p1 = ctx.p1().rev()
2766 p1 = ctx.p1().rev()
2751 p2 = ctx.p2().rev()
2767 p2 = ctx.p2().rev()
2752 bases = repo.changelog._commonancestorsheads(p1, p2)
2768 bases = repo.changelog._commonancestorsheads(p1, p2)
2753 for p in (p1, p2):
2769 for p in (p1, p2):
2754 for b in bases:
2770 for b in bases:
2755 base = repo[b]
2771 base = repo[b]
2756 parent = repo[p]
2772 parent = repo[p]
2757 missing = copies._computeforwardmissing(base, parent)
2773 missing = copies._computeforwardmissing(base, parent)
2758 if not missing:
2774 if not missing:
2759 continue
2775 continue
2760 data = {
2776 data = {
2761 b'source': base.hex(),
2777 b'source': base.hex(),
2762 b'destination': parent.hex(),
2778 b'destination': parent.hex(),
2763 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2779 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2764 b'nbmissingfiles': len(missing),
2780 b'nbmissingfiles': len(missing),
2765 }
2781 }
2766 if dostats:
2782 if dostats:
2767 alldata['nbrevs'].append(
2783 alldata['nbrevs'].append(
2768 (
2784 (
2769 data['nbrevs'],
2785 data['nbrevs'],
2770 base.hex(),
2786 base.hex(),
2771 parent.hex(),
2787 parent.hex(),
2772 )
2788 )
2773 )
2789 )
2774 alldata['nbmissingfiles'].append(
2790 alldata['nbmissingfiles'].append(
2775 (
2791 (
2776 data['nbmissingfiles'],
2792 data['nbmissingfiles'],
2777 base.hex(),
2793 base.hex(),
2778 parent.hex(),
2794 parent.hex(),
2779 )
2795 )
2780 )
2796 )
2781 if dotiming:
2797 if dotiming:
2782 begin = util.timer()
2798 begin = util.timer()
2783 renames = copies.pathcopies(base, parent)
2799 renames = copies.pathcopies(base, parent)
2784 end = util.timer()
2800 end = util.timer()
2785 # not very stable timing since we did only one run
2801 # not very stable timing since we did only one run
2786 data['time'] = end - begin
2802 data['time'] = end - begin
2787 data['nbrenamedfiles'] = len(renames)
2803 data['nbrenamedfiles'] = len(renames)
2788 if dostats:
2804 if dostats:
2789 alldata['time'].append(
2805 alldata['time'].append(
2790 (
2806 (
2791 data['time'],
2807 data['time'],
2792 base.hex(),
2808 base.hex(),
2793 parent.hex(),
2809 parent.hex(),
2794 )
2810 )
2795 )
2811 )
2796 alldata['nbrenames'].append(
2812 alldata['nbrenames'].append(
2797 (
2813 (
2798 data['nbrenamedfiles'],
2814 data['nbrenamedfiles'],
2799 base.hex(),
2815 base.hex(),
2800 parent.hex(),
2816 parent.hex(),
2801 )
2817 )
2802 )
2818 )
2803 fm.startitem()
2819 fm.startitem()
2804 fm.data(**data)
2820 fm.data(**data)
2805 out = data.copy()
2821 out = data.copy()
2806 out['source'] = fm.hexfunc(base.node())
2822 out['source'] = fm.hexfunc(base.node())
2807 out['destination'] = fm.hexfunc(parent.node())
2823 out['destination'] = fm.hexfunc(parent.node())
2808 fm.plain(output % out)
2824 fm.plain(output % out)
2809
2825
2810 fm.end()
2826 fm.end()
2811 if dostats:
2827 if dostats:
2812 entries = [
2828 entries = [
2813 ('nbrevs', 'number of revision covered'),
2829 ('nbrevs', 'number of revision covered'),
2814 ('nbmissingfiles', 'number of missing files at head'),
2830 ('nbmissingfiles', 'number of missing files at head'),
2815 ]
2831 ]
2816 if dotiming:
2832 if dotiming:
2817 entries.append(('nbrenames', 'renamed files'))
2833 entries.append(('nbrenames', 'renamed files'))
2818 entries.append(('time', 'time'))
2834 entries.append(('time', 'time'))
2819 _displaystats(ui, opts, entries, alldata)
2835 _displaystats(ui, opts, entries, alldata)
2820
2836
2821
2837
2822 @command(b'perf::cca|perfcca', formatteropts)
2838 @command(b'perf::cca|perfcca', formatteropts)
2823 def perfcca(ui, repo, **opts):
2839 def perfcca(ui, repo, **opts):
2824 opts = _byteskwargs(opts)
2840 opts = _byteskwargs(opts)
2825 timer, fm = gettimer(ui, opts)
2841 timer, fm = gettimer(ui, opts)
2826 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2842 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2827 fm.end()
2843 fm.end()
2828
2844
2829
2845
2830 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2846 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2831 def perffncacheload(ui, repo, **opts):
2847 def perffncacheload(ui, repo, **opts):
2832 opts = _byteskwargs(opts)
2848 opts = _byteskwargs(opts)
2833 timer, fm = gettimer(ui, opts)
2849 timer, fm = gettimer(ui, opts)
2834 s = repo.store
2850 s = repo.store
2835
2851
2836 def d():
2852 def d():
2837 s.fncache._load()
2853 s.fncache._load()
2838
2854
2839 timer(d)
2855 timer(d)
2840 fm.end()
2856 fm.end()
2841
2857
2842
2858
2843 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2859 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2844 def perffncachewrite(ui, repo, **opts):
2860 def perffncachewrite(ui, repo, **opts):
2845 opts = _byteskwargs(opts)
2861 opts = _byteskwargs(opts)
2846 timer, fm = gettimer(ui, opts)
2862 timer, fm = gettimer(ui, opts)
2847 s = repo.store
2863 s = repo.store
2848 lock = repo.lock()
2864 lock = repo.lock()
2849 s.fncache._load()
2865 s.fncache._load()
2850 tr = repo.transaction(b'perffncachewrite')
2866 tr = repo.transaction(b'perffncachewrite')
2851 tr.addbackup(b'fncache')
2867 tr.addbackup(b'fncache')
2852
2868
2853 def d():
2869 def d():
2854 s.fncache._dirty = True
2870 s.fncache._dirty = True
2855 s.fncache.write(tr)
2871 s.fncache.write(tr)
2856
2872
2857 timer(d)
2873 timer(d)
2858 tr.close()
2874 tr.close()
2859 lock.release()
2875 lock.release()
2860 fm.end()
2876 fm.end()
2861
2877
2862
2878
2863 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2879 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2864 def perffncacheencode(ui, repo, **opts):
2880 def perffncacheencode(ui, repo, **opts):
2865 opts = _byteskwargs(opts)
2881 opts = _byteskwargs(opts)
2866 timer, fm = gettimer(ui, opts)
2882 timer, fm = gettimer(ui, opts)
2867 s = repo.store
2883 s = repo.store
2868 s.fncache._load()
2884 s.fncache._load()
2869
2885
2870 def d():
2886 def d():
2871 for p in s.fncache.entries:
2887 for p in s.fncache.entries:
2872 s.encode(p)
2888 s.encode(p)
2873
2889
2874 timer(d)
2890 timer(d)
2875 fm.end()
2891 fm.end()
2876
2892
2877
2893
2878 def _bdiffworker(q, blocks, xdiff, ready, done):
2894 def _bdiffworker(q, blocks, xdiff, ready, done):
2879 while not done.is_set():
2895 while not done.is_set():
2880 pair = q.get()
2896 pair = q.get()
2881 while pair is not None:
2897 while pair is not None:
2882 if xdiff:
2898 if xdiff:
2883 mdiff.bdiff.xdiffblocks(*pair)
2899 mdiff.bdiff.xdiffblocks(*pair)
2884 elif blocks:
2900 elif blocks:
2885 mdiff.bdiff.blocks(*pair)
2901 mdiff.bdiff.blocks(*pair)
2886 else:
2902 else:
2887 mdiff.textdiff(*pair)
2903 mdiff.textdiff(*pair)
2888 q.task_done()
2904 q.task_done()
2889 pair = q.get()
2905 pair = q.get()
2890 q.task_done() # for the None one
2906 q.task_done() # for the None one
2891 with ready:
2907 with ready:
2892 ready.wait()
2908 ready.wait()
2893
2909
2894
2910
2895 def _manifestrevision(repo, mnode):
2911 def _manifestrevision(repo, mnode):
2896 ml = repo.manifestlog
2912 ml = repo.manifestlog
2897
2913
2898 if util.safehasattr(ml, b'getstorage'):
2914 if util.safehasattr(ml, b'getstorage'):
2899 store = ml.getstorage(b'')
2915 store = ml.getstorage(b'')
2900 else:
2916 else:
2901 store = ml._revlog
2917 store = ml._revlog
2902
2918
2903 return store.revision(mnode)
2919 return store.revision(mnode)
2904
2920
2905
2921
2906 @command(
2922 @command(
2907 b'perf::bdiff|perfbdiff',
2923 b'perf::bdiff|perfbdiff',
2908 revlogopts
2924 revlogopts
2909 + formatteropts
2925 + formatteropts
2910 + [
2926 + [
2911 (
2927 (
2912 b'',
2928 b'',
2913 b'count',
2929 b'count',
2914 1,
2930 1,
2915 b'number of revisions to test (when using --startrev)',
2931 b'number of revisions to test (when using --startrev)',
2916 ),
2932 ),
2917 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2933 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2918 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2934 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2919 (b'', b'blocks', False, b'test computing diffs into blocks'),
2935 (b'', b'blocks', False, b'test computing diffs into blocks'),
2920 (b'', b'xdiff', False, b'use xdiff algorithm'),
2936 (b'', b'xdiff', False, b'use xdiff algorithm'),
2921 ],
2937 ],
2922 b'-c|-m|FILE REV',
2938 b'-c|-m|FILE REV',
2923 )
2939 )
2924 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2940 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2925 """benchmark a bdiff between revisions
2941 """benchmark a bdiff between revisions
2926
2942
2927 By default, benchmark a bdiff between its delta parent and itself.
2943 By default, benchmark a bdiff between its delta parent and itself.
2928
2944
2929 With ``--count``, benchmark bdiffs between delta parents and self for N
2945 With ``--count``, benchmark bdiffs between delta parents and self for N
2930 revisions starting at the specified revision.
2946 revisions starting at the specified revision.
2931
2947
2932 With ``--alldata``, assume the requested revision is a changeset and
2948 With ``--alldata``, assume the requested revision is a changeset and
2933 measure bdiffs for all changes related to that changeset (manifest
2949 measure bdiffs for all changes related to that changeset (manifest
2934 and filelogs).
2950 and filelogs).
2935 """
2951 """
2936 opts = _byteskwargs(opts)
2952 opts = _byteskwargs(opts)
2937
2953
2938 if opts[b'xdiff'] and not opts[b'blocks']:
2954 if opts[b'xdiff'] and not opts[b'blocks']:
2939 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2955 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2940
2956
2941 if opts[b'alldata']:
2957 if opts[b'alldata']:
2942 opts[b'changelog'] = True
2958 opts[b'changelog'] = True
2943
2959
2944 if opts.get(b'changelog') or opts.get(b'manifest'):
2960 if opts.get(b'changelog') or opts.get(b'manifest'):
2945 file_, rev = None, file_
2961 file_, rev = None, file_
2946 elif rev is None:
2962 elif rev is None:
2947 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2963 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2948
2964
2949 blocks = opts[b'blocks']
2965 blocks = opts[b'blocks']
2950 xdiff = opts[b'xdiff']
2966 xdiff = opts[b'xdiff']
2951 textpairs = []
2967 textpairs = []
2952
2968
2953 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2969 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2954
2970
2955 startrev = r.rev(r.lookup(rev))
2971 startrev = r.rev(r.lookup(rev))
2956 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2972 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2957 if opts[b'alldata']:
2973 if opts[b'alldata']:
2958 # Load revisions associated with changeset.
2974 # Load revisions associated with changeset.
2959 ctx = repo[rev]
2975 ctx = repo[rev]
2960 mtext = _manifestrevision(repo, ctx.manifestnode())
2976 mtext = _manifestrevision(repo, ctx.manifestnode())
2961 for pctx in ctx.parents():
2977 for pctx in ctx.parents():
2962 pman = _manifestrevision(repo, pctx.manifestnode())
2978 pman = _manifestrevision(repo, pctx.manifestnode())
2963 textpairs.append((pman, mtext))
2979 textpairs.append((pman, mtext))
2964
2980
2965 # Load filelog revisions by iterating manifest delta.
2981 # Load filelog revisions by iterating manifest delta.
2966 man = ctx.manifest()
2982 man = ctx.manifest()
2967 pman = ctx.p1().manifest()
2983 pman = ctx.p1().manifest()
2968 for filename, change in pman.diff(man).items():
2984 for filename, change in pman.diff(man).items():
2969 fctx = repo.file(filename)
2985 fctx = repo.file(filename)
2970 f1 = fctx.revision(change[0][0] or -1)
2986 f1 = fctx.revision(change[0][0] or -1)
2971 f2 = fctx.revision(change[1][0] or -1)
2987 f2 = fctx.revision(change[1][0] or -1)
2972 textpairs.append((f1, f2))
2988 textpairs.append((f1, f2))
2973 else:
2989 else:
2974 dp = r.deltaparent(rev)
2990 dp = r.deltaparent(rev)
2975 textpairs.append((r.revision(dp), r.revision(rev)))
2991 textpairs.append((r.revision(dp), r.revision(rev)))
2976
2992
2977 withthreads = threads > 0
2993 withthreads = threads > 0
2978 if not withthreads:
2994 if not withthreads:
2979
2995
2980 def d():
2996 def d():
2981 for pair in textpairs:
2997 for pair in textpairs:
2982 if xdiff:
2998 if xdiff:
2983 mdiff.bdiff.xdiffblocks(*pair)
2999 mdiff.bdiff.xdiffblocks(*pair)
2984 elif blocks:
3000 elif blocks:
2985 mdiff.bdiff.blocks(*pair)
3001 mdiff.bdiff.blocks(*pair)
2986 else:
3002 else:
2987 mdiff.textdiff(*pair)
3003 mdiff.textdiff(*pair)
2988
3004
2989 else:
3005 else:
2990 q = queue()
3006 q = queue()
2991 for i in _xrange(threads):
3007 for i in _xrange(threads):
2992 q.put(None)
3008 q.put(None)
2993 ready = threading.Condition()
3009 ready = threading.Condition()
2994 done = threading.Event()
3010 done = threading.Event()
2995 for i in _xrange(threads):
3011 for i in _xrange(threads):
2996 threading.Thread(
3012 threading.Thread(
2997 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
3013 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2998 ).start()
3014 ).start()
2999 q.join()
3015 q.join()
3000
3016
3001 def d():
3017 def d():
3002 for pair in textpairs:
3018 for pair in textpairs:
3003 q.put(pair)
3019 q.put(pair)
3004 for i in _xrange(threads):
3020 for i in _xrange(threads):
3005 q.put(None)
3021 q.put(None)
3006 with ready:
3022 with ready:
3007 ready.notify_all()
3023 ready.notify_all()
3008 q.join()
3024 q.join()
3009
3025
3010 timer, fm = gettimer(ui, opts)
3026 timer, fm = gettimer(ui, opts)
3011 timer(d)
3027 timer(d)
3012 fm.end()
3028 fm.end()
3013
3029
3014 if withthreads:
3030 if withthreads:
3015 done.set()
3031 done.set()
3016 for i in _xrange(threads):
3032 for i in _xrange(threads):
3017 q.put(None)
3033 q.put(None)
3018 with ready:
3034 with ready:
3019 ready.notify_all()
3035 ready.notify_all()
3020
3036
3021
3037
3022 @command(
3038 @command(
3023 b'perf::unbundle',
3039 b'perf::unbundle',
3024 [
3040 [
3025 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3041 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3026 ]
3042 ]
3027 + formatteropts,
3043 + formatteropts,
3028 b'BUNDLE_FILE',
3044 b'BUNDLE_FILE',
3029 )
3045 )
3030 def perf_unbundle(ui, repo, fname, **opts):
3046 def perf_unbundle(ui, repo, fname, **opts):
3031 """benchmark application of a bundle in a repository.
3047 """benchmark application of a bundle in a repository.
3032
3048
3033 This does not include the final transaction processing
3049 This does not include the final transaction processing
3034
3050
3035 The --as-push option make the unbundle operation appears like it comes from
3051 The --as-push option make the unbundle operation appears like it comes from
3036 a client push. It change some aspect of the processing and associated
3052 a client push. It change some aspect of the processing and associated
3037 performance profile.
3053 performance profile.
3038 """
3054 """
3039
3055
3040 from mercurial import exchange
3056 from mercurial import exchange
3041 from mercurial import bundle2
3057 from mercurial import bundle2
3042 from mercurial import transaction
3058 from mercurial import transaction
3043
3059
3044 opts = _byteskwargs(opts)
3060 opts = _byteskwargs(opts)
3045
3061
3046 ### some compatibility hotfix
3062 ### some compatibility hotfix
3047 #
3063 #
3048 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3064 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3049 # critical regression that break transaction rollback for files that are
3065 # critical regression that break transaction rollback for files that are
3050 # de-inlined.
3066 # de-inlined.
3051 method = transaction.transaction._addentry
3067 method = transaction.transaction._addentry
3052 pre_63edc384d3b7 = "data" in getargspec(method).args
3068 pre_63edc384d3b7 = "data" in getargspec(method).args
3053 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3069 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3054 # a changeset that is a close descendant of 18415fc918a1, the changeset
3070 # a changeset that is a close descendant of 18415fc918a1, the changeset
3055 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3071 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3056 args = getargspec(error.Abort.__init__).args
3072 args = getargspec(error.Abort.__init__).args
3057 post_18415fc918a1 = "detailed_exit_code" in args
3073 post_18415fc918a1 = "detailed_exit_code" in args
3058
3074
3059 unbundle_source = b'perf::unbundle'
3075 unbundle_source = b'perf::unbundle'
3060 if opts[b'as_push']:
3076 if opts[b'as_push']:
3061 unbundle_source = b'push'
3077 unbundle_source = b'push'
3062
3078
3063 old_max_inline = None
3079 old_max_inline = None
3064 try:
3080 try:
3065 if not (pre_63edc384d3b7 or post_18415fc918a1):
3081 if not (pre_63edc384d3b7 or post_18415fc918a1):
3066 # disable inlining
3082 # disable inlining
3067 old_max_inline = mercurial.revlog._maxinline
3083 old_max_inline = mercurial.revlog._maxinline
3068 # large enough to never happen
3084 # large enough to never happen
3069 mercurial.revlog._maxinline = 2 ** 50
3085 mercurial.revlog._maxinline = 2 ** 50
3070
3086
3071 with repo.lock():
3087 with repo.lock():
3072 bundle = [None, None]
3088 bundle = [None, None]
3073 orig_quiet = repo.ui.quiet
3089 orig_quiet = repo.ui.quiet
3074 try:
3090 try:
3075 repo.ui.quiet = True
3091 repo.ui.quiet = True
3076 with open(fname, mode="rb") as f:
3092 with open(fname, mode="rb") as f:
3077
3093
3078 def noop_report(*args, **kwargs):
3094 def noop_report(*args, **kwargs):
3079 pass
3095 pass
3080
3096
3081 def setup():
3097 def setup():
3082 gen, tr = bundle
3098 gen, tr = bundle
3083 if tr is not None:
3099 if tr is not None:
3084 tr.abort()
3100 tr.abort()
3085 bundle[:] = [None, None]
3101 bundle[:] = [None, None]
3086 f.seek(0)
3102 f.seek(0)
3087 bundle[0] = exchange.readbundle(ui, f, fname)
3103 bundle[0] = exchange.readbundle(ui, f, fname)
3088 bundle[1] = repo.transaction(b'perf::unbundle')
3104 bundle[1] = repo.transaction(b'perf::unbundle')
3089 # silence the transaction
3105 # silence the transaction
3090 bundle[1]._report = noop_report
3106 bundle[1]._report = noop_report
3091
3107
3092 def apply():
3108 def apply():
3093 gen, tr = bundle
3109 gen, tr = bundle
3094 bundle2.applybundle(
3110 bundle2.applybundle(
3095 repo,
3111 repo,
3096 gen,
3112 gen,
3097 tr,
3113 tr,
3098 source=unbundle_source,
3114 source=unbundle_source,
3099 url=fname,
3115 url=fname,
3100 )
3116 )
3101
3117
3102 timer, fm = gettimer(ui, opts)
3118 timer, fm = gettimer(ui, opts)
3103 timer(apply, setup=setup)
3119 timer(apply, setup=setup)
3104 fm.end()
3120 fm.end()
3105 finally:
3121 finally:
3106 repo.ui.quiet == orig_quiet
3122 repo.ui.quiet == orig_quiet
3107 gen, tr = bundle
3123 gen, tr = bundle
3108 if tr is not None:
3124 if tr is not None:
3109 tr.abort()
3125 tr.abort()
3110 finally:
3126 finally:
3111 if old_max_inline is not None:
3127 if old_max_inline is not None:
3112 mercurial.revlog._maxinline = old_max_inline
3128 mercurial.revlog._maxinline = old_max_inline
3113
3129
3114
3130
3115 @command(
3131 @command(
3116 b'perf::unidiff|perfunidiff',
3132 b'perf::unidiff|perfunidiff',
3117 revlogopts
3133 revlogopts
3118 + formatteropts
3134 + formatteropts
3119 + [
3135 + [
3120 (
3136 (
3121 b'',
3137 b'',
3122 b'count',
3138 b'count',
3123 1,
3139 1,
3124 b'number of revisions to test (when using --startrev)',
3140 b'number of revisions to test (when using --startrev)',
3125 ),
3141 ),
3126 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3142 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3127 ],
3143 ],
3128 b'-c|-m|FILE REV',
3144 b'-c|-m|FILE REV',
3129 )
3145 )
3130 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3146 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3131 """benchmark a unified diff between revisions
3147 """benchmark a unified diff between revisions
3132
3148
3133 This doesn't include any copy tracing - it's just a unified diff
3149 This doesn't include any copy tracing - it's just a unified diff
3134 of the texts.
3150 of the texts.
3135
3151
3136 By default, benchmark a diff between its delta parent and itself.
3152 By default, benchmark a diff between its delta parent and itself.
3137
3153
3138 With ``--count``, benchmark diffs between delta parents and self for N
3154 With ``--count``, benchmark diffs between delta parents and self for N
3139 revisions starting at the specified revision.
3155 revisions starting at the specified revision.
3140
3156
3141 With ``--alldata``, assume the requested revision is a changeset and
3157 With ``--alldata``, assume the requested revision is a changeset and
3142 measure diffs for all changes related to that changeset (manifest
3158 measure diffs for all changes related to that changeset (manifest
3143 and filelogs).
3159 and filelogs).
3144 """
3160 """
3145 opts = _byteskwargs(opts)
3161 opts = _byteskwargs(opts)
3146 if opts[b'alldata']:
3162 if opts[b'alldata']:
3147 opts[b'changelog'] = True
3163 opts[b'changelog'] = True
3148
3164
3149 if opts.get(b'changelog') or opts.get(b'manifest'):
3165 if opts.get(b'changelog') or opts.get(b'manifest'):
3150 file_, rev = None, file_
3166 file_, rev = None, file_
3151 elif rev is None:
3167 elif rev is None:
3152 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3168 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3153
3169
3154 textpairs = []
3170 textpairs = []
3155
3171
3156 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3172 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3157
3173
3158 startrev = r.rev(r.lookup(rev))
3174 startrev = r.rev(r.lookup(rev))
3159 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3175 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3160 if opts[b'alldata']:
3176 if opts[b'alldata']:
3161 # Load revisions associated with changeset.
3177 # Load revisions associated with changeset.
3162 ctx = repo[rev]
3178 ctx = repo[rev]
3163 mtext = _manifestrevision(repo, ctx.manifestnode())
3179 mtext = _manifestrevision(repo, ctx.manifestnode())
3164 for pctx in ctx.parents():
3180 for pctx in ctx.parents():
3165 pman = _manifestrevision(repo, pctx.manifestnode())
3181 pman = _manifestrevision(repo, pctx.manifestnode())
3166 textpairs.append((pman, mtext))
3182 textpairs.append((pman, mtext))
3167
3183
3168 # Load filelog revisions by iterating manifest delta.
3184 # Load filelog revisions by iterating manifest delta.
3169 man = ctx.manifest()
3185 man = ctx.manifest()
3170 pman = ctx.p1().manifest()
3186 pman = ctx.p1().manifest()
3171 for filename, change in pman.diff(man).items():
3187 for filename, change in pman.diff(man).items():
3172 fctx = repo.file(filename)
3188 fctx = repo.file(filename)
3173 f1 = fctx.revision(change[0][0] or -1)
3189 f1 = fctx.revision(change[0][0] or -1)
3174 f2 = fctx.revision(change[1][0] or -1)
3190 f2 = fctx.revision(change[1][0] or -1)
3175 textpairs.append((f1, f2))
3191 textpairs.append((f1, f2))
3176 else:
3192 else:
3177 dp = r.deltaparent(rev)
3193 dp = r.deltaparent(rev)
3178 textpairs.append((r.revision(dp), r.revision(rev)))
3194 textpairs.append((r.revision(dp), r.revision(rev)))
3179
3195
3180 def d():
3196 def d():
3181 for left, right in textpairs:
3197 for left, right in textpairs:
3182 # The date strings don't matter, so we pass empty strings.
3198 # The date strings don't matter, so we pass empty strings.
3183 headerlines, hunks = mdiff.unidiff(
3199 headerlines, hunks = mdiff.unidiff(
3184 left, b'', right, b'', b'left', b'right', binary=False
3200 left, b'', right, b'', b'left', b'right', binary=False
3185 )
3201 )
3186 # consume iterators in roughly the way patch.py does
3202 # consume iterators in roughly the way patch.py does
3187 b'\n'.join(headerlines)
3203 b'\n'.join(headerlines)
3188 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3204 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3189
3205
3190 timer, fm = gettimer(ui, opts)
3206 timer, fm = gettimer(ui, opts)
3191 timer(d)
3207 timer(d)
3192 fm.end()
3208 fm.end()
3193
3209
3194
3210
3195 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3211 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3196 def perfdiffwd(ui, repo, **opts):
3212 def perfdiffwd(ui, repo, **opts):
3197 """Profile diff of working directory changes"""
3213 """Profile diff of working directory changes"""
3198 opts = _byteskwargs(opts)
3214 opts = _byteskwargs(opts)
3199 timer, fm = gettimer(ui, opts)
3215 timer, fm = gettimer(ui, opts)
3200 options = {
3216 options = {
3201 'w': 'ignore_all_space',
3217 'w': 'ignore_all_space',
3202 'b': 'ignore_space_change',
3218 'b': 'ignore_space_change',
3203 'B': 'ignore_blank_lines',
3219 'B': 'ignore_blank_lines',
3204 }
3220 }
3205
3221
3206 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3222 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3207 opts = {options[c]: b'1' for c in diffopt}
3223 opts = {options[c]: b'1' for c in diffopt}
3208
3224
3209 def d():
3225 def d():
3210 ui.pushbuffer()
3226 ui.pushbuffer()
3211 commands.diff(ui, repo, **opts)
3227 commands.diff(ui, repo, **opts)
3212 ui.popbuffer()
3228 ui.popbuffer()
3213
3229
3214 diffopt = diffopt.encode('ascii')
3230 diffopt = diffopt.encode('ascii')
3215 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3231 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3216 timer(d, title=title)
3232 timer(d, title=title)
3217 fm.end()
3233 fm.end()
3218
3234
3219
3235
3220 @command(
3236 @command(
3221 b'perf::revlogindex|perfrevlogindex',
3237 b'perf::revlogindex|perfrevlogindex',
3222 revlogopts + formatteropts,
3238 revlogopts + formatteropts,
3223 b'-c|-m|FILE',
3239 b'-c|-m|FILE',
3224 )
3240 )
3225 def perfrevlogindex(ui, repo, file_=None, **opts):
3241 def perfrevlogindex(ui, repo, file_=None, **opts):
3226 """Benchmark operations against a revlog index.
3242 """Benchmark operations against a revlog index.
3227
3243
3228 This tests constructing a revlog instance, reading index data,
3244 This tests constructing a revlog instance, reading index data,
3229 parsing index data, and performing various operations related to
3245 parsing index data, and performing various operations related to
3230 index data.
3246 index data.
3231 """
3247 """
3232
3248
3233 opts = _byteskwargs(opts)
3249 opts = _byteskwargs(opts)
3234
3250
3235 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3251 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3236
3252
3237 opener = getattr(rl, 'opener') # trick linter
3253 opener = getattr(rl, 'opener') # trick linter
3238 # compat with hg <= 5.8
3254 # compat with hg <= 5.8
3239 radix = getattr(rl, 'radix', None)
3255 radix = getattr(rl, 'radix', None)
3240 indexfile = getattr(rl, '_indexfile', None)
3256 indexfile = getattr(rl, '_indexfile', None)
3241 if indexfile is None:
3257 if indexfile is None:
3242 # compatibility with <= hg-5.8
3258 # compatibility with <= hg-5.8
3243 indexfile = getattr(rl, 'indexfile')
3259 indexfile = getattr(rl, 'indexfile')
3244 data = opener.read(indexfile)
3260 data = opener.read(indexfile)
3245
3261
3246 header = struct.unpack(b'>I', data[0:4])[0]
3262 header = struct.unpack(b'>I', data[0:4])[0]
3247 version = header & 0xFFFF
3263 version = header & 0xFFFF
3248 if version == 1:
3264 if version == 1:
3249 inline = header & (1 << 16)
3265 inline = header & (1 << 16)
3250 else:
3266 else:
3251 raise error.Abort(b'unsupported revlog version: %d' % version)
3267 raise error.Abort(b'unsupported revlog version: %d' % version)
3252
3268
3253 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3269 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3254 if parse_index_v1 is None:
3270 if parse_index_v1 is None:
3255 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3271 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3256
3272
3257 rllen = len(rl)
3273 rllen = len(rl)
3258
3274
3259 node0 = rl.node(0)
3275 node0 = rl.node(0)
3260 node25 = rl.node(rllen // 4)
3276 node25 = rl.node(rllen // 4)
3261 node50 = rl.node(rllen // 2)
3277 node50 = rl.node(rllen // 2)
3262 node75 = rl.node(rllen // 4 * 3)
3278 node75 = rl.node(rllen // 4 * 3)
3263 node100 = rl.node(rllen - 1)
3279 node100 = rl.node(rllen - 1)
3264
3280
3265 allrevs = range(rllen)
3281 allrevs = range(rllen)
3266 allrevsrev = list(reversed(allrevs))
3282 allrevsrev = list(reversed(allrevs))
3267 allnodes = [rl.node(rev) for rev in range(rllen)]
3283 allnodes = [rl.node(rev) for rev in range(rllen)]
3268 allnodesrev = list(reversed(allnodes))
3284 allnodesrev = list(reversed(allnodes))
3269
3285
3270 def constructor():
3286 def constructor():
3271 if radix is not None:
3287 if radix is not None:
3272 revlog(opener, radix=radix)
3288 revlog(opener, radix=radix)
3273 else:
3289 else:
3274 # hg <= 5.8
3290 # hg <= 5.8
3275 revlog(opener, indexfile=indexfile)
3291 revlog(opener, indexfile=indexfile)
3276
3292
3277 def read():
3293 def read():
3278 with opener(indexfile) as fh:
3294 with opener(indexfile) as fh:
3279 fh.read()
3295 fh.read()
3280
3296
3281 def parseindex():
3297 def parseindex():
3282 parse_index_v1(data, inline)
3298 parse_index_v1(data, inline)
3283
3299
3284 def getentry(revornode):
3300 def getentry(revornode):
3285 index = parse_index_v1(data, inline)[0]
3301 index = parse_index_v1(data, inline)[0]
3286 index[revornode]
3302 index[revornode]
3287
3303
3288 def getentries(revs, count=1):
3304 def getentries(revs, count=1):
3289 index = parse_index_v1(data, inline)[0]
3305 index = parse_index_v1(data, inline)[0]
3290
3306
3291 for i in range(count):
3307 for i in range(count):
3292 for rev in revs:
3308 for rev in revs:
3293 index[rev]
3309 index[rev]
3294
3310
3295 def resolvenode(node):
3311 def resolvenode(node):
3296 index = parse_index_v1(data, inline)[0]
3312 index = parse_index_v1(data, inline)[0]
3297 rev = getattr(index, 'rev', None)
3313 rev = getattr(index, 'rev', None)
3298 if rev is None:
3314 if rev is None:
3299 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3315 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3300 # This only works for the C code.
3316 # This only works for the C code.
3301 if nodemap is None:
3317 if nodemap is None:
3302 return
3318 return
3303 rev = nodemap.__getitem__
3319 rev = nodemap.__getitem__
3304
3320
3305 try:
3321 try:
3306 rev(node)
3322 rev(node)
3307 except error.RevlogError:
3323 except error.RevlogError:
3308 pass
3324 pass
3309
3325
3310 def resolvenodes(nodes, count=1):
3326 def resolvenodes(nodes, count=1):
3311 index = parse_index_v1(data, inline)[0]
3327 index = parse_index_v1(data, inline)[0]
3312 rev = getattr(index, 'rev', None)
3328 rev = getattr(index, 'rev', None)
3313 if rev is None:
3329 if rev is None:
3314 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3330 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3315 # This only works for the C code.
3331 # This only works for the C code.
3316 if nodemap is None:
3332 if nodemap is None:
3317 return
3333 return
3318 rev = nodemap.__getitem__
3334 rev = nodemap.__getitem__
3319
3335
3320 for i in range(count):
3336 for i in range(count):
3321 for node in nodes:
3337 for node in nodes:
3322 try:
3338 try:
3323 rev(node)
3339 rev(node)
3324 except error.RevlogError:
3340 except error.RevlogError:
3325 pass
3341 pass
3326
3342
3327 benches = [
3343 benches = [
3328 (constructor, b'revlog constructor'),
3344 (constructor, b'revlog constructor'),
3329 (read, b'read'),
3345 (read, b'read'),
3330 (parseindex, b'create index object'),
3346 (parseindex, b'create index object'),
3331 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3347 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3332 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3348 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3333 (lambda: resolvenode(node0), b'look up node at rev 0'),
3349 (lambda: resolvenode(node0), b'look up node at rev 0'),
3334 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3350 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3335 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3351 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3336 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3352 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3337 (lambda: resolvenode(node100), b'look up node at tip'),
3353 (lambda: resolvenode(node100), b'look up node at tip'),
3338 # 2x variation is to measure caching impact.
3354 # 2x variation is to measure caching impact.
3339 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3355 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3340 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3356 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3341 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3357 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3342 (
3358 (
3343 lambda: resolvenodes(allnodesrev, 2),
3359 lambda: resolvenodes(allnodesrev, 2),
3344 b'look up all nodes 2x (reverse)',
3360 b'look up all nodes 2x (reverse)',
3345 ),
3361 ),
3346 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3362 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3347 (
3363 (
3348 lambda: getentries(allrevs, 2),
3364 lambda: getentries(allrevs, 2),
3349 b'retrieve all index entries 2x (forward)',
3365 b'retrieve all index entries 2x (forward)',
3350 ),
3366 ),
3351 (
3367 (
3352 lambda: getentries(allrevsrev),
3368 lambda: getentries(allrevsrev),
3353 b'retrieve all index entries (reverse)',
3369 b'retrieve all index entries (reverse)',
3354 ),
3370 ),
3355 (
3371 (
3356 lambda: getentries(allrevsrev, 2),
3372 lambda: getentries(allrevsrev, 2),
3357 b'retrieve all index entries 2x (reverse)',
3373 b'retrieve all index entries 2x (reverse)',
3358 ),
3374 ),
3359 ]
3375 ]
3360
3376
3361 for fn, title in benches:
3377 for fn, title in benches:
3362 timer, fm = gettimer(ui, opts)
3378 timer, fm = gettimer(ui, opts)
3363 timer(fn, title=title)
3379 timer(fn, title=title)
3364 fm.end()
3380 fm.end()
3365
3381
3366
3382
3367 @command(
3383 @command(
3368 b'perf::revlogrevisions|perfrevlogrevisions',
3384 b'perf::revlogrevisions|perfrevlogrevisions',
3369 revlogopts
3385 revlogopts
3370 + formatteropts
3386 + formatteropts
3371 + [
3387 + [
3372 (b'd', b'dist', 100, b'distance between the revisions'),
3388 (b'd', b'dist', 100, b'distance between the revisions'),
3373 (b's', b'startrev', 0, b'revision to start reading at'),
3389 (b's', b'startrev', 0, b'revision to start reading at'),
3374 (b'', b'reverse', False, b'read in reverse'),
3390 (b'', b'reverse', False, b'read in reverse'),
3375 ],
3391 ],
3376 b'-c|-m|FILE',
3392 b'-c|-m|FILE',
3377 )
3393 )
3378 def perfrevlogrevisions(
3394 def perfrevlogrevisions(
3379 ui, repo, file_=None, startrev=0, reverse=False, **opts
3395 ui, repo, file_=None, startrev=0, reverse=False, **opts
3380 ):
3396 ):
3381 """Benchmark reading a series of revisions from a revlog.
3397 """Benchmark reading a series of revisions from a revlog.
3382
3398
3383 By default, we read every ``-d/--dist`` revision from 0 to tip of
3399 By default, we read every ``-d/--dist`` revision from 0 to tip of
3384 the specified revlog.
3400 the specified revlog.
3385
3401
3386 The start revision can be defined via ``-s/--startrev``.
3402 The start revision can be defined via ``-s/--startrev``.
3387 """
3403 """
3388 opts = _byteskwargs(opts)
3404 opts = _byteskwargs(opts)
3389
3405
3390 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3406 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3391 rllen = getlen(ui)(rl)
3407 rllen = getlen(ui)(rl)
3392
3408
3393 if startrev < 0:
3409 if startrev < 0:
3394 startrev = rllen + startrev
3410 startrev = rllen + startrev
3395
3411
3396 def d():
3412 def d():
3397 rl.clearcaches()
3413 rl.clearcaches()
3398
3414
3399 beginrev = startrev
3415 beginrev = startrev
3400 endrev = rllen
3416 endrev = rllen
3401 dist = opts[b'dist']
3417 dist = opts[b'dist']
3402
3418
3403 if reverse:
3419 if reverse:
3404 beginrev, endrev = endrev - 1, beginrev - 1
3420 beginrev, endrev = endrev - 1, beginrev - 1
3405 dist = -1 * dist
3421 dist = -1 * dist
3406
3422
3407 for x in _xrange(beginrev, endrev, dist):
3423 for x in _xrange(beginrev, endrev, dist):
3408 # Old revisions don't support passing int.
3424 # Old revisions don't support passing int.
3409 n = rl.node(x)
3425 n = rl.node(x)
3410 rl.revision(n)
3426 rl.revision(n)
3411
3427
3412 timer, fm = gettimer(ui, opts)
3428 timer, fm = gettimer(ui, opts)
3413 timer(d)
3429 timer(d)
3414 fm.end()
3430 fm.end()
3415
3431
3416
3432
3417 @command(
3433 @command(
3418 b'perf::revlogwrite|perfrevlogwrite',
3434 b'perf::revlogwrite|perfrevlogwrite',
3419 revlogopts
3435 revlogopts
3420 + formatteropts
3436 + formatteropts
3421 + [
3437 + [
3422 (b's', b'startrev', 1000, b'revision to start writing at'),
3438 (b's', b'startrev', 1000, b'revision to start writing at'),
3423 (b'', b'stoprev', -1, b'last revision to write'),
3439 (b'', b'stoprev', -1, b'last revision to write'),
3424 (b'', b'count', 3, b'number of passes to perform'),
3440 (b'', b'count', 3, b'number of passes to perform'),
3425 (b'', b'details', False, b'print timing for every revisions tested'),
3441 (b'', b'details', False, b'print timing for every revisions tested'),
3426 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3442 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3427 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3443 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3428 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3444 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3429 ],
3445 ],
3430 b'-c|-m|FILE',
3446 b'-c|-m|FILE',
3431 )
3447 )
3432 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3448 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3433 """Benchmark writing a series of revisions to a revlog.
3449 """Benchmark writing a series of revisions to a revlog.
3434
3450
3435 Possible source values are:
3451 Possible source values are:
3436 * `full`: add from a full text (default).
3452 * `full`: add from a full text (default).
3437 * `parent-1`: add from a delta to the first parent
3453 * `parent-1`: add from a delta to the first parent
3438 * `parent-2`: add from a delta to the second parent if it exists
3454 * `parent-2`: add from a delta to the second parent if it exists
3439 (use a delta from the first parent otherwise)
3455 (use a delta from the first parent otherwise)
3440 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3456 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3441 * `storage`: add from the existing precomputed deltas
3457 * `storage`: add from the existing precomputed deltas
3442
3458
3443 Note: This performance command measures performance in a custom way. As a
3459 Note: This performance command measures performance in a custom way. As a
3444 result some of the global configuration of the 'perf' command does not
3460 result some of the global configuration of the 'perf' command does not
3445 apply to it:
3461 apply to it:
3446
3462
3447 * ``pre-run``: disabled
3463 * ``pre-run``: disabled
3448
3464
3449 * ``profile-benchmark``: disabled
3465 * ``profile-benchmark``: disabled
3450
3466
3451 * ``run-limits``: disabled use --count instead
3467 * ``run-limits``: disabled use --count instead
3452 """
3468 """
3453 opts = _byteskwargs(opts)
3469 opts = _byteskwargs(opts)
3454
3470
3455 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3471 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3456 rllen = getlen(ui)(rl)
3472 rllen = getlen(ui)(rl)
3457 if startrev < 0:
3473 if startrev < 0:
3458 startrev = rllen + startrev
3474 startrev = rllen + startrev
3459 if stoprev < 0:
3475 if stoprev < 0:
3460 stoprev = rllen + stoprev
3476 stoprev = rllen + stoprev
3461
3477
3462 lazydeltabase = opts['lazydeltabase']
3478 lazydeltabase = opts['lazydeltabase']
3463 source = opts['source']
3479 source = opts['source']
3464 clearcaches = opts['clear_caches']
3480 clearcaches = opts['clear_caches']
3465 validsource = (
3481 validsource = (
3466 b'full',
3482 b'full',
3467 b'parent-1',
3483 b'parent-1',
3468 b'parent-2',
3484 b'parent-2',
3469 b'parent-smallest',
3485 b'parent-smallest',
3470 b'storage',
3486 b'storage',
3471 )
3487 )
3472 if source not in validsource:
3488 if source not in validsource:
3473 raise error.Abort('invalid source type: %s' % source)
3489 raise error.Abort('invalid source type: %s' % source)
3474
3490
3475 ### actually gather results
3491 ### actually gather results
3476 count = opts['count']
3492 count = opts['count']
3477 if count <= 0:
3493 if count <= 0:
3478 raise error.Abort('invalide run count: %d' % count)
3494 raise error.Abort('invalide run count: %d' % count)
3479 allresults = []
3495 allresults = []
3480 for c in range(count):
3496 for c in range(count):
3481 timing = _timeonewrite(
3497 timing = _timeonewrite(
3482 ui,
3498 ui,
3483 rl,
3499 rl,
3484 source,
3500 source,
3485 startrev,
3501 startrev,
3486 stoprev,
3502 stoprev,
3487 c + 1,
3503 c + 1,
3488 lazydeltabase=lazydeltabase,
3504 lazydeltabase=lazydeltabase,
3489 clearcaches=clearcaches,
3505 clearcaches=clearcaches,
3490 )
3506 )
3491 allresults.append(timing)
3507 allresults.append(timing)
3492
3508
3493 ### consolidate the results in a single list
3509 ### consolidate the results in a single list
3494 results = []
3510 results = []
3495 for idx, (rev, t) in enumerate(allresults[0]):
3511 for idx, (rev, t) in enumerate(allresults[0]):
3496 ts = [t]
3512 ts = [t]
3497 for other in allresults[1:]:
3513 for other in allresults[1:]:
3498 orev, ot = other[idx]
3514 orev, ot = other[idx]
3499 assert orev == rev
3515 assert orev == rev
3500 ts.append(ot)
3516 ts.append(ot)
3501 results.append((rev, ts))
3517 results.append((rev, ts))
3502 resultcount = len(results)
3518 resultcount = len(results)
3503
3519
3504 ### Compute and display relevant statistics
3520 ### Compute and display relevant statistics
3505
3521
3506 # get a formatter
3522 # get a formatter
3507 fm = ui.formatter(b'perf', opts)
3523 fm = ui.formatter(b'perf', opts)
3508 displayall = ui.configbool(b"perf", b"all-timing", True)
3524 displayall = ui.configbool(b"perf", b"all-timing", True)
3509
3525
3510 # print individual details if requested
3526 # print individual details if requested
3511 if opts['details']:
3527 if opts['details']:
3512 for idx, item in enumerate(results, 1):
3528 for idx, item in enumerate(results, 1):
3513 rev, data = item
3529 rev, data = item
3514 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3530 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3515 formatone(fm, data, title=title, displayall=displayall)
3531 formatone(fm, data, title=title, displayall=displayall)
3516
3532
3517 # sorts results by median time
3533 # sorts results by median time
3518 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3534 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3519 # list of (name, index) to display)
3535 # list of (name, index) to display)
3520 relevants = [
3536 relevants = [
3521 ("min", 0),
3537 ("min", 0),
3522 ("10%", resultcount * 10 // 100),
3538 ("10%", resultcount * 10 // 100),
3523 ("25%", resultcount * 25 // 100),
3539 ("25%", resultcount * 25 // 100),
3524 ("50%", resultcount * 70 // 100),
3540 ("50%", resultcount * 70 // 100),
3525 ("75%", resultcount * 75 // 100),
3541 ("75%", resultcount * 75 // 100),
3526 ("90%", resultcount * 90 // 100),
3542 ("90%", resultcount * 90 // 100),
3527 ("95%", resultcount * 95 // 100),
3543 ("95%", resultcount * 95 // 100),
3528 ("99%", resultcount * 99 // 100),
3544 ("99%", resultcount * 99 // 100),
3529 ("99.9%", resultcount * 999 // 1000),
3545 ("99.9%", resultcount * 999 // 1000),
3530 ("99.99%", resultcount * 9999 // 10000),
3546 ("99.99%", resultcount * 9999 // 10000),
3531 ("99.999%", resultcount * 99999 // 100000),
3547 ("99.999%", resultcount * 99999 // 100000),
3532 ("max", -1),
3548 ("max", -1),
3533 ]
3549 ]
3534 if not ui.quiet:
3550 if not ui.quiet:
3535 for name, idx in relevants:
3551 for name, idx in relevants:
3536 data = results[idx]
3552 data = results[idx]
3537 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3553 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3538 formatone(fm, data[1], title=title, displayall=displayall)
3554 formatone(fm, data[1], title=title, displayall=displayall)
3539
3555
3540 # XXX summing that many float will not be very precise, we ignore this fact
3556 # XXX summing that many float will not be very precise, we ignore this fact
3541 # for now
3557 # for now
3542 totaltime = []
3558 totaltime = []
3543 for item in allresults:
3559 for item in allresults:
3544 totaltime.append(
3560 totaltime.append(
3545 (
3561 (
3546 sum(x[1][0] for x in item),
3562 sum(x[1][0] for x in item),
3547 sum(x[1][1] for x in item),
3563 sum(x[1][1] for x in item),
3548 sum(x[1][2] for x in item),
3564 sum(x[1][2] for x in item),
3549 )
3565 )
3550 )
3566 )
3551 formatone(
3567 formatone(
3552 fm,
3568 fm,
3553 totaltime,
3569 totaltime,
3554 title="total time (%d revs)" % resultcount,
3570 title="total time (%d revs)" % resultcount,
3555 displayall=displayall,
3571 displayall=displayall,
3556 )
3572 )
3557 fm.end()
3573 fm.end()
3558
3574
3559
3575
3560 class _faketr:
3576 class _faketr:
3561 def add(s, x, y, z=None):
3577 def add(s, x, y, z=None):
3562 return None
3578 return None
3563
3579
3564
3580
3565 def _timeonewrite(
3581 def _timeonewrite(
3566 ui,
3582 ui,
3567 orig,
3583 orig,
3568 source,
3584 source,
3569 startrev,
3585 startrev,
3570 stoprev,
3586 stoprev,
3571 runidx=None,
3587 runidx=None,
3572 lazydeltabase=True,
3588 lazydeltabase=True,
3573 clearcaches=True,
3589 clearcaches=True,
3574 ):
3590 ):
3575 timings = []
3591 timings = []
3576 tr = _faketr()
3592 tr = _faketr()
3577 with _temprevlog(ui, orig, startrev) as dest:
3593 with _temprevlog(ui, orig, startrev) as dest:
3578 if hasattr(dest, "delta_config"):
3594 if hasattr(dest, "delta_config"):
3579 dest.delta_config.lazy_delta_base = lazydeltabase
3595 dest.delta_config.lazy_delta_base = lazydeltabase
3580 else:
3596 else:
3581 dest._lazydeltabase = lazydeltabase
3597 dest._lazydeltabase = lazydeltabase
3582 revs = list(orig.revs(startrev, stoprev))
3598 revs = list(orig.revs(startrev, stoprev))
3583 total = len(revs)
3599 total = len(revs)
3584 topic = 'adding'
3600 topic = 'adding'
3585 if runidx is not None:
3601 if runidx is not None:
3586 topic += ' (run #%d)' % runidx
3602 topic += ' (run #%d)' % runidx
3587 # Support both old and new progress API
3603 # Support both old and new progress API
3588 if util.safehasattr(ui, 'makeprogress'):
3604 if util.safehasattr(ui, 'makeprogress'):
3589 progress = ui.makeprogress(topic, unit='revs', total=total)
3605 progress = ui.makeprogress(topic, unit='revs', total=total)
3590
3606
3591 def updateprogress(pos):
3607 def updateprogress(pos):
3592 progress.update(pos)
3608 progress.update(pos)
3593
3609
3594 def completeprogress():
3610 def completeprogress():
3595 progress.complete()
3611 progress.complete()
3596
3612
3597 else:
3613 else:
3598
3614
3599 def updateprogress(pos):
3615 def updateprogress(pos):
3600 ui.progress(topic, pos, unit='revs', total=total)
3616 ui.progress(topic, pos, unit='revs', total=total)
3601
3617
3602 def completeprogress():
3618 def completeprogress():
3603 ui.progress(topic, None, unit='revs', total=total)
3619 ui.progress(topic, None, unit='revs', total=total)
3604
3620
3605 for idx, rev in enumerate(revs):
3621 for idx, rev in enumerate(revs):
3606 updateprogress(idx)
3622 updateprogress(idx)
3607 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3623 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3608 if clearcaches:
3624 if clearcaches:
3609 dest.index.clearcaches()
3625 dest.index.clearcaches()
3610 dest.clearcaches()
3626 dest.clearcaches()
3611 with timeone() as r:
3627 with timeone() as r:
3612 dest.addrawrevision(*addargs, **addkwargs)
3628 dest.addrawrevision(*addargs, **addkwargs)
3613 timings.append((rev, r[0]))
3629 timings.append((rev, r[0]))
3614 updateprogress(total)
3630 updateprogress(total)
3615 completeprogress()
3631 completeprogress()
3616 return timings
3632 return timings
3617
3633
3618
3634
3619 def _getrevisionseed(orig, rev, tr, source):
3635 def _getrevisionseed(orig, rev, tr, source):
3620 from mercurial.node import nullid
3636 from mercurial.node import nullid
3621
3637
3622 linkrev = orig.linkrev(rev)
3638 linkrev = orig.linkrev(rev)
3623 node = orig.node(rev)
3639 node = orig.node(rev)
3624 p1, p2 = orig.parents(node)
3640 p1, p2 = orig.parents(node)
3625 flags = orig.flags(rev)
3641 flags = orig.flags(rev)
3626 cachedelta = None
3642 cachedelta = None
3627 text = None
3643 text = None
3628
3644
3629 if source == b'full':
3645 if source == b'full':
3630 text = orig.revision(rev)
3646 text = orig.revision(rev)
3631 elif source == b'parent-1':
3647 elif source == b'parent-1':
3632 baserev = orig.rev(p1)
3648 baserev = orig.rev(p1)
3633 cachedelta = (baserev, orig.revdiff(p1, rev))
3649 cachedelta = (baserev, orig.revdiff(p1, rev))
3634 elif source == b'parent-2':
3650 elif source == b'parent-2':
3635 parent = p2
3651 parent = p2
3636 if p2 == nullid:
3652 if p2 == nullid:
3637 parent = p1
3653 parent = p1
3638 baserev = orig.rev(parent)
3654 baserev = orig.rev(parent)
3639 cachedelta = (baserev, orig.revdiff(parent, rev))
3655 cachedelta = (baserev, orig.revdiff(parent, rev))
3640 elif source == b'parent-smallest':
3656 elif source == b'parent-smallest':
3641 p1diff = orig.revdiff(p1, rev)
3657 p1diff = orig.revdiff(p1, rev)
3642 parent = p1
3658 parent = p1
3643 diff = p1diff
3659 diff = p1diff
3644 if p2 != nullid:
3660 if p2 != nullid:
3645 p2diff = orig.revdiff(p2, rev)
3661 p2diff = orig.revdiff(p2, rev)
3646 if len(p1diff) > len(p2diff):
3662 if len(p1diff) > len(p2diff):
3647 parent = p2
3663 parent = p2
3648 diff = p2diff
3664 diff = p2diff
3649 baserev = orig.rev(parent)
3665 baserev = orig.rev(parent)
3650 cachedelta = (baserev, diff)
3666 cachedelta = (baserev, diff)
3651 elif source == b'storage':
3667 elif source == b'storage':
3652 baserev = orig.deltaparent(rev)
3668 baserev = orig.deltaparent(rev)
3653 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3669 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3654
3670
3655 return (
3671 return (
3656 (text, tr, linkrev, p1, p2),
3672 (text, tr, linkrev, p1, p2),
3657 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3673 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3658 )
3674 )
3659
3675
3660
3676
3661 @contextlib.contextmanager
3677 @contextlib.contextmanager
3662 def _temprevlog(ui, orig, truncaterev):
3678 def _temprevlog(ui, orig, truncaterev):
3663 from mercurial import vfs as vfsmod
3679 from mercurial import vfs as vfsmod
3664
3680
3665 if orig._inline:
3681 if orig._inline:
3666 raise error.Abort('not supporting inline revlog (yet)')
3682 raise error.Abort('not supporting inline revlog (yet)')
3667 revlogkwargs = {}
3683 revlogkwargs = {}
3668 k = 'upperboundcomp'
3684 k = 'upperboundcomp'
3669 if util.safehasattr(orig, k):
3685 if util.safehasattr(orig, k):
3670 revlogkwargs[k] = getattr(orig, k)
3686 revlogkwargs[k] = getattr(orig, k)
3671
3687
3672 indexfile = getattr(orig, '_indexfile', None)
3688 indexfile = getattr(orig, '_indexfile', None)
3673 if indexfile is None:
3689 if indexfile is None:
3674 # compatibility with <= hg-5.8
3690 # compatibility with <= hg-5.8
3675 indexfile = getattr(orig, 'indexfile')
3691 indexfile = getattr(orig, 'indexfile')
3676 origindexpath = orig.opener.join(indexfile)
3692 origindexpath = orig.opener.join(indexfile)
3677
3693
3678 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3694 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3679 origdatapath = orig.opener.join(datafile)
3695 origdatapath = orig.opener.join(datafile)
3680 radix = b'revlog'
3696 radix = b'revlog'
3681 indexname = b'revlog.i'
3697 indexname = b'revlog.i'
3682 dataname = b'revlog.d'
3698 dataname = b'revlog.d'
3683
3699
3684 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3700 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3685 try:
3701 try:
3686 # copy the data file in a temporary directory
3702 # copy the data file in a temporary directory
3687 ui.debug('copying data in %s\n' % tmpdir)
3703 ui.debug('copying data in %s\n' % tmpdir)
3688 destindexpath = os.path.join(tmpdir, 'revlog.i')
3704 destindexpath = os.path.join(tmpdir, 'revlog.i')
3689 destdatapath = os.path.join(tmpdir, 'revlog.d')
3705 destdatapath = os.path.join(tmpdir, 'revlog.d')
3690 shutil.copyfile(origindexpath, destindexpath)
3706 shutil.copyfile(origindexpath, destindexpath)
3691 shutil.copyfile(origdatapath, destdatapath)
3707 shutil.copyfile(origdatapath, destdatapath)
3692
3708
3693 # remove the data we want to add again
3709 # remove the data we want to add again
3694 ui.debug('truncating data to be rewritten\n')
3710 ui.debug('truncating data to be rewritten\n')
3695 with open(destindexpath, 'ab') as index:
3711 with open(destindexpath, 'ab') as index:
3696 index.seek(0)
3712 index.seek(0)
3697 index.truncate(truncaterev * orig._io.size)
3713 index.truncate(truncaterev * orig._io.size)
3698 with open(destdatapath, 'ab') as data:
3714 with open(destdatapath, 'ab') as data:
3699 data.seek(0)
3715 data.seek(0)
3700 data.truncate(orig.start(truncaterev))
3716 data.truncate(orig.start(truncaterev))
3701
3717
3702 # instantiate a new revlog from the temporary copy
3718 # instantiate a new revlog from the temporary copy
3703 ui.debug('truncating adding to be rewritten\n')
3719 ui.debug('truncating adding to be rewritten\n')
3704 vfs = vfsmod.vfs(tmpdir)
3720 vfs = vfsmod.vfs(tmpdir)
3705 vfs.options = getattr(orig.opener, 'options', None)
3721 vfs.options = getattr(orig.opener, 'options', None)
3706
3722
3707 try:
3723 try:
3708 dest = revlog(vfs, radix=radix, **revlogkwargs)
3724 dest = revlog(vfs, radix=radix, **revlogkwargs)
3709 except TypeError:
3725 except TypeError:
3710 dest = revlog(
3726 dest = revlog(
3711 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3727 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3712 )
3728 )
3713 if dest._inline:
3729 if dest._inline:
3714 raise error.Abort('not supporting inline revlog (yet)')
3730 raise error.Abort('not supporting inline revlog (yet)')
3715 # make sure internals are initialized
3731 # make sure internals are initialized
3716 dest.revision(len(dest) - 1)
3732 dest.revision(len(dest) - 1)
3717 yield dest
3733 yield dest
3718 del dest, vfs
3734 del dest, vfs
3719 finally:
3735 finally:
3720 shutil.rmtree(tmpdir, True)
3736 shutil.rmtree(tmpdir, True)
3721
3737
3722
3738
3723 @command(
3739 @command(
3724 b'perf::revlogchunks|perfrevlogchunks',
3740 b'perf::revlogchunks|perfrevlogchunks',
3725 revlogopts
3741 revlogopts
3726 + formatteropts
3742 + formatteropts
3727 + [
3743 + [
3728 (b'e', b'engines', b'', b'compression engines to use'),
3744 (b'e', b'engines', b'', b'compression engines to use'),
3729 (b's', b'startrev', 0, b'revision to start at'),
3745 (b's', b'startrev', 0, b'revision to start at'),
3730 ],
3746 ],
3731 b'-c|-m|FILE',
3747 b'-c|-m|FILE',
3732 )
3748 )
3733 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3749 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3734 """Benchmark operations on revlog chunks.
3750 """Benchmark operations on revlog chunks.
3735
3751
3736 Logically, each revlog is a collection of fulltext revisions. However,
3752 Logically, each revlog is a collection of fulltext revisions. However,
3737 stored within each revlog are "chunks" of possibly compressed data. This
3753 stored within each revlog are "chunks" of possibly compressed data. This
3738 data needs to be read and decompressed or compressed and written.
3754 data needs to be read and decompressed or compressed and written.
3739
3755
3740 This command measures the time it takes to read+decompress and recompress
3756 This command measures the time it takes to read+decompress and recompress
3741 chunks in a revlog. It effectively isolates I/O and compression performance.
3757 chunks in a revlog. It effectively isolates I/O and compression performance.
3742 For measurements of higher-level operations like resolving revisions,
3758 For measurements of higher-level operations like resolving revisions,
3743 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3759 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3744 """
3760 """
3745 opts = _byteskwargs(opts)
3761 opts = _byteskwargs(opts)
3746
3762
3747 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3763 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3748
3764
3749 # - _chunkraw was renamed to _getsegmentforrevs
3765 # - _chunkraw was renamed to _getsegmentforrevs
3750 # - _getsegmentforrevs was moved on the inner object
3766 # - _getsegmentforrevs was moved on the inner object
3751 try:
3767 try:
3752 segmentforrevs = rl._inner.get_segment_for_revs
3768 segmentforrevs = rl._inner.get_segment_for_revs
3753 except AttributeError:
3769 except AttributeError:
3754 try:
3770 try:
3755 segmentforrevs = rl._getsegmentforrevs
3771 segmentforrevs = rl._getsegmentforrevs
3756 except AttributeError:
3772 except AttributeError:
3757 segmentforrevs = rl._chunkraw
3773 segmentforrevs = rl._chunkraw
3758
3774
3759 # Verify engines argument.
3775 # Verify engines argument.
3760 if engines:
3776 if engines:
3761 engines = {e.strip() for e in engines.split(b',')}
3777 engines = {e.strip() for e in engines.split(b',')}
3762 for engine in engines:
3778 for engine in engines:
3763 try:
3779 try:
3764 util.compressionengines[engine]
3780 util.compressionengines[engine]
3765 except KeyError:
3781 except KeyError:
3766 raise error.Abort(b'unknown compression engine: %s' % engine)
3782 raise error.Abort(b'unknown compression engine: %s' % engine)
3767 else:
3783 else:
3768 engines = []
3784 engines = []
3769 for e in util.compengines:
3785 for e in util.compengines:
3770 engine = util.compengines[e]
3786 engine = util.compengines[e]
3771 try:
3787 try:
3772 if engine.available():
3788 if engine.available():
3773 engine.revlogcompressor().compress(b'dummy')
3789 engine.revlogcompressor().compress(b'dummy')
3774 engines.append(e)
3790 engines.append(e)
3775 except NotImplementedError:
3791 except NotImplementedError:
3776 pass
3792 pass
3777
3793
3778 revs = list(rl.revs(startrev, len(rl) - 1))
3794 revs = list(rl.revs(startrev, len(rl) - 1))
3779
3795
3780 @contextlib.contextmanager
3796 @contextlib.contextmanager
3781 def reading(rl):
3797 def reading(rl):
3782 if getattr(rl, 'reading', None) is not None:
3798 if getattr(rl, 'reading', None) is not None:
3783 with rl.reading():
3799 with rl.reading():
3784 yield None
3800 yield None
3785 elif rl._inline:
3801 elif rl._inline:
3786 indexfile = getattr(rl, '_indexfile', None)
3802 indexfile = getattr(rl, '_indexfile', None)
3787 if indexfile is None:
3803 if indexfile is None:
3788 # compatibility with <= hg-5.8
3804 # compatibility with <= hg-5.8
3789 indexfile = getattr(rl, 'indexfile')
3805 indexfile = getattr(rl, 'indexfile')
3790 yield getsvfs(repo)(indexfile)
3806 yield getsvfs(repo)(indexfile)
3791 else:
3807 else:
3792 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3808 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3793 yield getsvfs(repo)(datafile)
3809 yield getsvfs(repo)(datafile)
3794
3810
3795 if getattr(rl, 'reading', None) is not None:
3811 if getattr(rl, 'reading', None) is not None:
3796
3812
3797 @contextlib.contextmanager
3813 @contextlib.contextmanager
3798 def lazy_reading(rl):
3814 def lazy_reading(rl):
3799 with rl.reading():
3815 with rl.reading():
3800 yield
3816 yield
3801
3817
3802 else:
3818 else:
3803
3819
3804 @contextlib.contextmanager
3820 @contextlib.contextmanager
3805 def lazy_reading(rl):
3821 def lazy_reading(rl):
3806 yield
3822 yield
3807
3823
3808 def doread():
3824 def doread():
3809 rl.clearcaches()
3825 rl.clearcaches()
3810 for rev in revs:
3826 for rev in revs:
3811 with lazy_reading(rl):
3827 with lazy_reading(rl):
3812 segmentforrevs(rev, rev)
3828 segmentforrevs(rev, rev)
3813
3829
3814 def doreadcachedfh():
3830 def doreadcachedfh():
3815 rl.clearcaches()
3831 rl.clearcaches()
3816 with reading(rl) as fh:
3832 with reading(rl) as fh:
3817 if fh is not None:
3833 if fh is not None:
3818 for rev in revs:
3834 for rev in revs:
3819 segmentforrevs(rev, rev, df=fh)
3835 segmentforrevs(rev, rev, df=fh)
3820 else:
3836 else:
3821 for rev in revs:
3837 for rev in revs:
3822 segmentforrevs(rev, rev)
3838 segmentforrevs(rev, rev)
3823
3839
3824 def doreadbatch():
3840 def doreadbatch():
3825 rl.clearcaches()
3841 rl.clearcaches()
3826 with lazy_reading(rl):
3842 with lazy_reading(rl):
3827 segmentforrevs(revs[0], revs[-1])
3843 segmentforrevs(revs[0], revs[-1])
3828
3844
3829 def doreadbatchcachedfh():
3845 def doreadbatchcachedfh():
3830 rl.clearcaches()
3846 rl.clearcaches()
3831 with reading(rl) as fh:
3847 with reading(rl) as fh:
3832 if fh is not None:
3848 if fh is not None:
3833 segmentforrevs(revs[0], revs[-1], df=fh)
3849 segmentforrevs(revs[0], revs[-1], df=fh)
3834 else:
3850 else:
3835 segmentforrevs(revs[0], revs[-1])
3851 segmentforrevs(revs[0], revs[-1])
3836
3852
3837 def dochunk():
3853 def dochunk():
3838 rl.clearcaches()
3854 rl.clearcaches()
3839 # chunk used to be available directly on the revlog
3855 # chunk used to be available directly on the revlog
3840 _chunk = getattr(rl, '_inner', rl)._chunk
3856 _chunk = getattr(rl, '_inner', rl)._chunk
3841 with reading(rl) as fh:
3857 with reading(rl) as fh:
3842 if fh is not None:
3858 if fh is not None:
3843 for rev in revs:
3859 for rev in revs:
3844 _chunk(rev, df=fh)
3860 _chunk(rev, df=fh)
3845 else:
3861 else:
3846 for rev in revs:
3862 for rev in revs:
3847 _chunk(rev)
3863 _chunk(rev)
3848
3864
3849 chunks = [None]
3865 chunks = [None]
3850
3866
3851 def dochunkbatch():
3867 def dochunkbatch():
3852 rl.clearcaches()
3868 rl.clearcaches()
3853 _chunks = getattr(rl, '_inner', rl)._chunks
3869 _chunks = getattr(rl, '_inner', rl)._chunks
3854 with reading(rl) as fh:
3870 with reading(rl) as fh:
3855 if fh is not None:
3871 if fh is not None:
3856 # Save chunks as a side-effect.
3872 # Save chunks as a side-effect.
3857 chunks[0] = _chunks(revs, df=fh)
3873 chunks[0] = _chunks(revs, df=fh)
3858 else:
3874 else:
3859 # Save chunks as a side-effect.
3875 # Save chunks as a side-effect.
3860 chunks[0] = _chunks(revs)
3876 chunks[0] = _chunks(revs)
3861
3877
3862 def docompress(compressor):
3878 def docompress(compressor):
3863 rl.clearcaches()
3879 rl.clearcaches()
3864
3880
3865 compressor_holder = getattr(rl, '_inner', rl)
3881 compressor_holder = getattr(rl, '_inner', rl)
3866
3882
3867 try:
3883 try:
3868 # Swap in the requested compression engine.
3884 # Swap in the requested compression engine.
3869 oldcompressor = compressor_holder._compressor
3885 oldcompressor = compressor_holder._compressor
3870 compressor_holder._compressor = compressor
3886 compressor_holder._compressor = compressor
3871 for chunk in chunks[0]:
3887 for chunk in chunks[0]:
3872 rl.compress(chunk)
3888 rl.compress(chunk)
3873 finally:
3889 finally:
3874 compressor_holder._compressor = oldcompressor
3890 compressor_holder._compressor = oldcompressor
3875
3891
3876 benches = [
3892 benches = [
3877 (lambda: doread(), b'read'),
3893 (lambda: doread(), b'read'),
3878 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3894 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3879 (lambda: doreadbatch(), b'read batch'),
3895 (lambda: doreadbatch(), b'read batch'),
3880 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3896 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3881 (lambda: dochunk(), b'chunk'),
3897 (lambda: dochunk(), b'chunk'),
3882 (lambda: dochunkbatch(), b'chunk batch'),
3898 (lambda: dochunkbatch(), b'chunk batch'),
3883 ]
3899 ]
3884
3900
3885 for engine in sorted(engines):
3901 for engine in sorted(engines):
3886 compressor = util.compengines[engine].revlogcompressor()
3902 compressor = util.compengines[engine].revlogcompressor()
3887 benches.append(
3903 benches.append(
3888 (
3904 (
3889 functools.partial(docompress, compressor),
3905 functools.partial(docompress, compressor),
3890 b'compress w/ %s' % engine,
3906 b'compress w/ %s' % engine,
3891 )
3907 )
3892 )
3908 )
3893
3909
3894 for fn, title in benches:
3910 for fn, title in benches:
3895 timer, fm = gettimer(ui, opts)
3911 timer, fm = gettimer(ui, opts)
3896 timer(fn, title=title)
3912 timer(fn, title=title)
3897 fm.end()
3913 fm.end()
3898
3914
3899
3915
3900 @command(
3916 @command(
3901 b'perf::revlogrevision|perfrevlogrevision',
3917 b'perf::revlogrevision|perfrevlogrevision',
3902 revlogopts
3918 revlogopts
3903 + formatteropts
3919 + formatteropts
3904 + [(b'', b'cache', False, b'use caches instead of clearing')],
3920 + [(b'', b'cache', False, b'use caches instead of clearing')],
3905 b'-c|-m|FILE REV',
3921 b'-c|-m|FILE REV',
3906 )
3922 )
3907 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3923 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3908 """Benchmark obtaining a revlog revision.
3924 """Benchmark obtaining a revlog revision.
3909
3925
3910 Obtaining a revlog revision consists of roughly the following steps:
3926 Obtaining a revlog revision consists of roughly the following steps:
3911
3927
3912 1. Compute the delta chain
3928 1. Compute the delta chain
3913 2. Slice the delta chain if applicable
3929 2. Slice the delta chain if applicable
3914 3. Obtain the raw chunks for that delta chain
3930 3. Obtain the raw chunks for that delta chain
3915 4. Decompress each raw chunk
3931 4. Decompress each raw chunk
3916 5. Apply binary patches to obtain fulltext
3932 5. Apply binary patches to obtain fulltext
3917 6. Verify hash of fulltext
3933 6. Verify hash of fulltext
3918
3934
3919 This command measures the time spent in each of these phases.
3935 This command measures the time spent in each of these phases.
3920 """
3936 """
3921 opts = _byteskwargs(opts)
3937 opts = _byteskwargs(opts)
3922
3938
3923 if opts.get(b'changelog') or opts.get(b'manifest'):
3939 if opts.get(b'changelog') or opts.get(b'manifest'):
3924 file_, rev = None, file_
3940 file_, rev = None, file_
3925 elif rev is None:
3941 elif rev is None:
3926 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3942 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3927
3943
3928 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3944 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3929
3945
3930 # _chunkraw was renamed to _getsegmentforrevs.
3946 # _chunkraw was renamed to _getsegmentforrevs.
3931 try:
3947 try:
3932 segmentforrevs = r._inner.get_segment_for_revs
3948 segmentforrevs = r._inner.get_segment_for_revs
3933 except AttributeError:
3949 except AttributeError:
3934 try:
3950 try:
3935 segmentforrevs = r._getsegmentforrevs
3951 segmentforrevs = r._getsegmentforrevs
3936 except AttributeError:
3952 except AttributeError:
3937 segmentforrevs = r._chunkraw
3953 segmentforrevs = r._chunkraw
3938
3954
3939 node = r.lookup(rev)
3955 node = r.lookup(rev)
3940 rev = r.rev(node)
3956 rev = r.rev(node)
3941
3957
3942 if getattr(r, 'reading', None) is not None:
3958 if getattr(r, 'reading', None) is not None:
3943
3959
3944 @contextlib.contextmanager
3960 @contextlib.contextmanager
3945 def lazy_reading(r):
3961 def lazy_reading(r):
3946 with r.reading():
3962 with r.reading():
3947 yield
3963 yield
3948
3964
3949 else:
3965 else:
3950
3966
3951 @contextlib.contextmanager
3967 @contextlib.contextmanager
3952 def lazy_reading(r):
3968 def lazy_reading(r):
3953 yield
3969 yield
3954
3970
3955 def getrawchunks(data, chain):
3971 def getrawchunks(data, chain):
3956 start = r.start
3972 start = r.start
3957 length = r.length
3973 length = r.length
3958 inline = r._inline
3974 inline = r._inline
3959 try:
3975 try:
3960 iosize = r.index.entry_size
3976 iosize = r.index.entry_size
3961 except AttributeError:
3977 except AttributeError:
3962 iosize = r._io.size
3978 iosize = r._io.size
3963 buffer = util.buffer
3979 buffer = util.buffer
3964
3980
3965 chunks = []
3981 chunks = []
3966 ladd = chunks.append
3982 ladd = chunks.append
3967 for idx, item in enumerate(chain):
3983 for idx, item in enumerate(chain):
3968 offset = start(item[0])
3984 offset = start(item[0])
3969 bits = data[idx]
3985 bits = data[idx]
3970 for rev in item:
3986 for rev in item:
3971 chunkstart = start(rev)
3987 chunkstart = start(rev)
3972 if inline:
3988 if inline:
3973 chunkstart += (rev + 1) * iosize
3989 chunkstart += (rev + 1) * iosize
3974 chunklength = length(rev)
3990 chunklength = length(rev)
3975 ladd(buffer(bits, chunkstart - offset, chunklength))
3991 ladd(buffer(bits, chunkstart - offset, chunklength))
3976
3992
3977 return chunks
3993 return chunks
3978
3994
3979 def dodeltachain(rev):
3995 def dodeltachain(rev):
3980 if not cache:
3996 if not cache:
3981 r.clearcaches()
3997 r.clearcaches()
3982 r._deltachain(rev)
3998 r._deltachain(rev)
3983
3999
3984 def doread(chain):
4000 def doread(chain):
3985 if not cache:
4001 if not cache:
3986 r.clearcaches()
4002 r.clearcaches()
3987 for item in slicedchain:
4003 for item in slicedchain:
3988 with lazy_reading(r):
4004 with lazy_reading(r):
3989 segmentforrevs(item[0], item[-1])
4005 segmentforrevs(item[0], item[-1])
3990
4006
3991 def doslice(r, chain, size):
4007 def doslice(r, chain, size):
3992 for s in slicechunk(r, chain, targetsize=size):
4008 for s in slicechunk(r, chain, targetsize=size):
3993 pass
4009 pass
3994
4010
3995 def dorawchunks(data, chain):
4011 def dorawchunks(data, chain):
3996 if not cache:
4012 if not cache:
3997 r.clearcaches()
4013 r.clearcaches()
3998 getrawchunks(data, chain)
4014 getrawchunks(data, chain)
3999
4015
4000 def dodecompress(chunks):
4016 def dodecompress(chunks):
4001 decomp = r.decompress
4017 decomp = r.decompress
4002 for chunk in chunks:
4018 for chunk in chunks:
4003 decomp(chunk)
4019 decomp(chunk)
4004
4020
4005 def dopatch(text, bins):
4021 def dopatch(text, bins):
4006 if not cache:
4022 if not cache:
4007 r.clearcaches()
4023 r.clearcaches()
4008 mdiff.patches(text, bins)
4024 mdiff.patches(text, bins)
4009
4025
4010 def dohash(text):
4026 def dohash(text):
4011 if not cache:
4027 if not cache:
4012 r.clearcaches()
4028 r.clearcaches()
4013 r.checkhash(text, node, rev=rev)
4029 r.checkhash(text, node, rev=rev)
4014
4030
4015 def dorevision():
4031 def dorevision():
4016 if not cache:
4032 if not cache:
4017 r.clearcaches()
4033 r.clearcaches()
4018 r.revision(node)
4034 r.revision(node)
4019
4035
4020 try:
4036 try:
4021 from mercurial.revlogutils.deltas import slicechunk
4037 from mercurial.revlogutils.deltas import slicechunk
4022 except ImportError:
4038 except ImportError:
4023 slicechunk = getattr(revlog, '_slicechunk', None)
4039 slicechunk = getattr(revlog, '_slicechunk', None)
4024
4040
4025 size = r.length(rev)
4041 size = r.length(rev)
4026 chain = r._deltachain(rev)[0]
4042 chain = r._deltachain(rev)[0]
4027
4043
4028 with_sparse_read = False
4044 with_sparse_read = False
4029 if hasattr(r, 'data_config'):
4045 if hasattr(r, 'data_config'):
4030 with_sparse_read = r.data_config.with_sparse_read
4046 with_sparse_read = r.data_config.with_sparse_read
4031 elif hasattr(r, '_withsparseread'):
4047 elif hasattr(r, '_withsparseread'):
4032 with_sparse_read = r._withsparseread
4048 with_sparse_read = r._withsparseread
4033 if with_sparse_read:
4049 if with_sparse_read:
4034 slicedchain = (chain,)
4050 slicedchain = (chain,)
4035 else:
4051 else:
4036 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4052 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4037 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4053 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4038 rawchunks = getrawchunks(data, slicedchain)
4054 rawchunks = getrawchunks(data, slicedchain)
4039 bins = r._inner._chunks(chain)
4055 bins = r._inner._chunks(chain)
4040 text = bytes(bins[0])
4056 text = bytes(bins[0])
4041 bins = bins[1:]
4057 bins = bins[1:]
4042 text = mdiff.patches(text, bins)
4058 text = mdiff.patches(text, bins)
4043
4059
4044 benches = [
4060 benches = [
4045 (lambda: dorevision(), b'full'),
4061 (lambda: dorevision(), b'full'),
4046 (lambda: dodeltachain(rev), b'deltachain'),
4062 (lambda: dodeltachain(rev), b'deltachain'),
4047 (lambda: doread(chain), b'read'),
4063 (lambda: doread(chain), b'read'),
4048 ]
4064 ]
4049
4065
4050 if with_sparse_read:
4066 if with_sparse_read:
4051 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4067 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4052 benches.append(slicing)
4068 benches.append(slicing)
4053
4069
4054 benches.extend(
4070 benches.extend(
4055 [
4071 [
4056 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4072 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4057 (lambda: dodecompress(rawchunks), b'decompress'),
4073 (lambda: dodecompress(rawchunks), b'decompress'),
4058 (lambda: dopatch(text, bins), b'patch'),
4074 (lambda: dopatch(text, bins), b'patch'),
4059 (lambda: dohash(text), b'hash'),
4075 (lambda: dohash(text), b'hash'),
4060 ]
4076 ]
4061 )
4077 )
4062
4078
4063 timer, fm = gettimer(ui, opts)
4079 timer, fm = gettimer(ui, opts)
4064 for fn, title in benches:
4080 for fn, title in benches:
4065 timer(fn, title=title)
4081 timer(fn, title=title)
4066 fm.end()
4082 fm.end()
4067
4083
4068
4084
4069 @command(
4085 @command(
4070 b'perf::revset|perfrevset',
4086 b'perf::revset|perfrevset',
4071 [
4087 [
4072 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4088 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4073 (b'', b'contexts', False, b'obtain changectx for each revision'),
4089 (b'', b'contexts', False, b'obtain changectx for each revision'),
4074 ]
4090 ]
4075 + formatteropts,
4091 + formatteropts,
4076 b"REVSET",
4092 b"REVSET",
4077 )
4093 )
4078 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4094 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4079 """benchmark the execution time of a revset
4095 """benchmark the execution time of a revset
4080
4096
4081 Use the --clean option if need to evaluate the impact of build volatile
4097 Use the --clean option if need to evaluate the impact of build volatile
4082 revisions set cache on the revset execution. Volatile cache hold filtered
4098 revisions set cache on the revset execution. Volatile cache hold filtered
4083 and obsolete related cache."""
4099 and obsolete related cache."""
4084 opts = _byteskwargs(opts)
4100 opts = _byteskwargs(opts)
4085
4101
4086 timer, fm = gettimer(ui, opts)
4102 timer, fm = gettimer(ui, opts)
4087
4103
4088 def d():
4104 def d():
4089 if clear:
4105 if clear:
4090 repo.invalidatevolatilesets()
4106 repo.invalidatevolatilesets()
4091 if contexts:
4107 if contexts:
4092 for ctx in repo.set(expr):
4108 for ctx in repo.set(expr):
4093 pass
4109 pass
4094 else:
4110 else:
4095 for r in repo.revs(expr):
4111 for r in repo.revs(expr):
4096 pass
4112 pass
4097
4113
4098 timer(d)
4114 timer(d)
4099 fm.end()
4115 fm.end()
4100
4116
4101
4117
4102 @command(
4118 @command(
4103 b'perf::volatilesets|perfvolatilesets',
4119 b'perf::volatilesets|perfvolatilesets',
4104 [
4120 [
4105 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4121 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4106 ]
4122 ]
4107 + formatteropts,
4123 + formatteropts,
4108 )
4124 )
4109 def perfvolatilesets(ui, repo, *names, **opts):
4125 def perfvolatilesets(ui, repo, *names, **opts):
4110 """benchmark the computation of various volatile set
4126 """benchmark the computation of various volatile set
4111
4127
4112 Volatile set computes element related to filtering and obsolescence."""
4128 Volatile set computes element related to filtering and obsolescence."""
4113 opts = _byteskwargs(opts)
4129 opts = _byteskwargs(opts)
4114 timer, fm = gettimer(ui, opts)
4130 timer, fm = gettimer(ui, opts)
4115 repo = repo.unfiltered()
4131 repo = repo.unfiltered()
4116
4132
4117 def getobs(name):
4133 def getobs(name):
4118 def d():
4134 def d():
4119 repo.invalidatevolatilesets()
4135 repo.invalidatevolatilesets()
4120 if opts[b'clear_obsstore']:
4136 if opts[b'clear_obsstore']:
4121 clearfilecache(repo, b'obsstore')
4137 clearfilecache(repo, b'obsstore')
4122 obsolete.getrevs(repo, name)
4138 obsolete.getrevs(repo, name)
4123
4139
4124 return d
4140 return d
4125
4141
4126 allobs = sorted(obsolete.cachefuncs)
4142 allobs = sorted(obsolete.cachefuncs)
4127 if names:
4143 if names:
4128 allobs = [n for n in allobs if n in names]
4144 allobs = [n for n in allobs if n in names]
4129
4145
4130 for name in allobs:
4146 for name in allobs:
4131 timer(getobs(name), title=name)
4147 timer(getobs(name), title=name)
4132
4148
4133 def getfiltered(name):
4149 def getfiltered(name):
4134 def d():
4150 def d():
4135 repo.invalidatevolatilesets()
4151 repo.invalidatevolatilesets()
4136 if opts[b'clear_obsstore']:
4152 if opts[b'clear_obsstore']:
4137 clearfilecache(repo, b'obsstore')
4153 clearfilecache(repo, b'obsstore')
4138 repoview.filterrevs(repo, name)
4154 repoview.filterrevs(repo, name)
4139
4155
4140 return d
4156 return d
4141
4157
4142 allfilter = sorted(repoview.filtertable)
4158 allfilter = sorted(repoview.filtertable)
4143 if names:
4159 if names:
4144 allfilter = [n for n in allfilter if n in names]
4160 allfilter = [n for n in allfilter if n in names]
4145
4161
4146 for name in allfilter:
4162 for name in allfilter:
4147 timer(getfiltered(name), title=name)
4163 timer(getfiltered(name), title=name)
4148 fm.end()
4164 fm.end()
4149
4165
4150
4166
4151 @command(
4167 @command(
4152 b'perf::branchmap|perfbranchmap',
4168 b'perf::branchmap|perfbranchmap',
4153 [
4169 [
4154 (b'f', b'full', False, b'Includes build time of subset'),
4170 (b'f', b'full', False, b'Includes build time of subset'),
4155 (
4171 (
4156 b'',
4172 b'',
4157 b'clear-revbranch',
4173 b'clear-revbranch',
4158 False,
4174 False,
4159 b'purge the revbranch cache between computation',
4175 b'purge the revbranch cache between computation',
4160 ),
4176 ),
4161 ]
4177 ]
4162 + formatteropts,
4178 + formatteropts,
4163 )
4179 )
4164 def perfbranchmap(ui, repo, *filternames, **opts):
4180 def perfbranchmap(ui, repo, *filternames, **opts):
4165 """benchmark the update of a branchmap
4181 """benchmark the update of a branchmap
4166
4182
4167 This benchmarks the full repo.branchmap() call with read and write disabled
4183 This benchmarks the full repo.branchmap() call with read and write disabled
4168 """
4184 """
4169 opts = _byteskwargs(opts)
4185 opts = _byteskwargs(opts)
4170 full = opts.get(b"full", False)
4186 full = opts.get(b"full", False)
4171 clear_revbranch = opts.get(b"clear_revbranch", False)
4187 clear_revbranch = opts.get(b"clear_revbranch", False)
4172 timer, fm = gettimer(ui, opts)
4188 timer, fm = gettimer(ui, opts)
4173
4189
4174 def getbranchmap(filtername):
4190 def getbranchmap(filtername):
4175 """generate a benchmark function for the filtername"""
4191 """generate a benchmark function for the filtername"""
4176 if filtername is None:
4192 if filtername is None:
4177 view = repo
4193 view = repo
4178 else:
4194 else:
4179 view = repo.filtered(filtername)
4195 view = repo.filtered(filtername)
4180 if util.safehasattr(view._branchcaches, '_per_filter'):
4196 if util.safehasattr(view._branchcaches, '_per_filter'):
4181 filtered = view._branchcaches._per_filter
4197 filtered = view._branchcaches._per_filter
4182 else:
4198 else:
4183 # older versions
4199 # older versions
4184 filtered = view._branchcaches
4200 filtered = view._branchcaches
4185
4201
4186 def d():
4202 def d():
4187 if clear_revbranch:
4203 if clear_revbranch:
4188 repo.revbranchcache()._clear()
4204 repo.revbranchcache()._clear()
4189 if full:
4205 if full:
4190 view._branchcaches.clear()
4206 view._branchcaches.clear()
4191 else:
4207 else:
4192 filtered.pop(filtername, None)
4208 filtered.pop(filtername, None)
4193 view.branchmap()
4209 view.branchmap()
4194
4210
4195 return d
4211 return d
4196
4212
4197 # add filter in smaller subset to bigger subset
4213 # add filter in smaller subset to bigger subset
4198 possiblefilters = set(repoview.filtertable)
4214 possiblefilters = set(repoview.filtertable)
4199 if filternames:
4215 if filternames:
4200 possiblefilters &= set(filternames)
4216 possiblefilters &= set(filternames)
4201 subsettable = getbranchmapsubsettable()
4217 subsettable = getbranchmapsubsettable()
4202 allfilters = []
4218 allfilters = []
4203 while possiblefilters:
4219 while possiblefilters:
4204 for name in possiblefilters:
4220 for name in possiblefilters:
4205 subset = subsettable.get(name)
4221 subset = subsettable.get(name)
4206 if subset not in possiblefilters:
4222 if subset not in possiblefilters:
4207 break
4223 break
4208 else:
4224 else:
4209 assert False, b'subset cycle %s!' % possiblefilters
4225 assert False, b'subset cycle %s!' % possiblefilters
4210 allfilters.append(name)
4226 allfilters.append(name)
4211 possiblefilters.remove(name)
4227 possiblefilters.remove(name)
4212
4228
4213 # warm the cache
4229 # warm the cache
4214 if not full:
4230 if not full:
4215 for name in allfilters:
4231 for name in allfilters:
4216 repo.filtered(name).branchmap()
4232 repo.filtered(name).branchmap()
4217 if not filternames or b'unfiltered' in filternames:
4233 if not filternames or b'unfiltered' in filternames:
4218 # add unfiltered
4234 # add unfiltered
4219 allfilters.append(None)
4235 allfilters.append(None)
4220
4236
4221 old_branch_cache_from_file = None
4237 old_branch_cache_from_file = None
4222 branchcacheread = None
4238 branchcacheread = None
4223 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4239 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4224 old_branch_cache_from_file = branchmap.branch_cache_from_file
4240 old_branch_cache_from_file = branchmap.branch_cache_from_file
4225 branchmap.branch_cache_from_file = lambda *args: None
4241 branchmap.branch_cache_from_file = lambda *args: None
4226 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4242 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4227 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4243 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4228 branchcacheread.set(classmethod(lambda *args: None))
4244 branchcacheread.set(classmethod(lambda *args: None))
4229 else:
4245 else:
4230 # older versions
4246 # older versions
4231 branchcacheread = safeattrsetter(branchmap, b'read')
4247 branchcacheread = safeattrsetter(branchmap, b'read')
4232 branchcacheread.set(lambda *args: None)
4248 branchcacheread.set(lambda *args: None)
4233 if util.safehasattr(branchmap, '_LocalBranchCache'):
4249 if util.safehasattr(branchmap, '_LocalBranchCache'):
4234 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4250 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4235 branchcachewrite.set(lambda *args: None)
4251 branchcachewrite.set(lambda *args: None)
4236 else:
4252 else:
4237 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4253 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4238 branchcachewrite.set(lambda *args: None)
4254 branchcachewrite.set(lambda *args: None)
4239 try:
4255 try:
4240 for name in allfilters:
4256 for name in allfilters:
4241 printname = name
4257 printname = name
4242 if name is None:
4258 if name is None:
4243 printname = b'unfiltered'
4259 printname = b'unfiltered'
4244 timer(getbranchmap(name), title=printname)
4260 timer(getbranchmap(name), title=printname)
4245 finally:
4261 finally:
4246 if old_branch_cache_from_file is not None:
4262 if old_branch_cache_from_file is not None:
4247 branchmap.branch_cache_from_file = old_branch_cache_from_file
4263 branchmap.branch_cache_from_file = old_branch_cache_from_file
4248 if branchcacheread is not None:
4264 if branchcacheread is not None:
4249 branchcacheread.restore()
4265 branchcacheread.restore()
4250 branchcachewrite.restore()
4266 branchcachewrite.restore()
4251 fm.end()
4267 fm.end()
4252
4268
4253
4269
4254 @command(
4270 @command(
4255 b'perf::branchmapupdate|perfbranchmapupdate',
4271 b'perf::branchmapupdate|perfbranchmapupdate',
4256 [
4272 [
4257 (b'', b'base', [], b'subset of revision to start from'),
4273 (b'', b'base', [], b'subset of revision to start from'),
4258 (b'', b'target', [], b'subset of revision to end with'),
4274 (b'', b'target', [], b'subset of revision to end with'),
4259 (b'', b'clear-caches', False, b'clear cache between each runs'),
4275 (b'', b'clear-caches', False, b'clear cache between each runs'),
4260 ]
4276 ]
4261 + formatteropts,
4277 + formatteropts,
4262 )
4278 )
4263 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4279 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4264 """benchmark branchmap update from for <base> revs to <target> revs
4280 """benchmark branchmap update from for <base> revs to <target> revs
4265
4281
4266 If `--clear-caches` is passed, the following items will be reset before
4282 If `--clear-caches` is passed, the following items will be reset before
4267 each update:
4283 each update:
4268 * the changelog instance and associated indexes
4284 * the changelog instance and associated indexes
4269 * the rev-branch-cache instance
4285 * the rev-branch-cache instance
4270
4286
4271 Examples:
4287 Examples:
4272
4288
4273 # update for the one last revision
4289 # update for the one last revision
4274 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4290 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4275
4291
4276 $ update for change coming with a new branch
4292 $ update for change coming with a new branch
4277 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4293 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4278 """
4294 """
4279 from mercurial import branchmap
4295 from mercurial import branchmap
4280 from mercurial import repoview
4296 from mercurial import repoview
4281
4297
4282 opts = _byteskwargs(opts)
4298 opts = _byteskwargs(opts)
4283 timer, fm = gettimer(ui, opts)
4299 timer, fm = gettimer(ui, opts)
4284 clearcaches = opts[b'clear_caches']
4300 clearcaches = opts[b'clear_caches']
4285 unfi = repo.unfiltered()
4301 unfi = repo.unfiltered()
4286 x = [None] # used to pass data between closure
4302 x = [None] # used to pass data between closure
4287
4303
4288 # we use a `list` here to avoid possible side effect from smartset
4304 # we use a `list` here to avoid possible side effect from smartset
4289 baserevs = list(scmutil.revrange(repo, base))
4305 baserevs = list(scmutil.revrange(repo, base))
4290 targetrevs = list(scmutil.revrange(repo, target))
4306 targetrevs = list(scmutil.revrange(repo, target))
4291 if not baserevs:
4307 if not baserevs:
4292 raise error.Abort(b'no revisions selected for --base')
4308 raise error.Abort(b'no revisions selected for --base')
4293 if not targetrevs:
4309 if not targetrevs:
4294 raise error.Abort(b'no revisions selected for --target')
4310 raise error.Abort(b'no revisions selected for --target')
4295
4311
4296 # make sure the target branchmap also contains the one in the base
4312 # make sure the target branchmap also contains the one in the base
4297 targetrevs = list(set(baserevs) | set(targetrevs))
4313 targetrevs = list(set(baserevs) | set(targetrevs))
4298 targetrevs.sort()
4314 targetrevs.sort()
4299
4315
4300 cl = repo.changelog
4316 cl = repo.changelog
4301 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4317 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4302 allbaserevs.sort()
4318 allbaserevs.sort()
4303 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4319 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4304
4320
4305 newrevs = list(alltargetrevs.difference(allbaserevs))
4321 newrevs = list(alltargetrevs.difference(allbaserevs))
4306 newrevs.sort()
4322 newrevs.sort()
4307
4323
4308 allrevs = frozenset(unfi.changelog.revs())
4324 allrevs = frozenset(unfi.changelog.revs())
4309 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4325 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4310 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4326 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4311
4327
4312 def basefilter(repo, visibilityexceptions=None):
4328 def basefilter(repo, visibilityexceptions=None):
4313 return basefilterrevs
4329 return basefilterrevs
4314
4330
4315 def targetfilter(repo, visibilityexceptions=None):
4331 def targetfilter(repo, visibilityexceptions=None):
4316 return targetfilterrevs
4332 return targetfilterrevs
4317
4333
4318 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4334 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4319 ui.status(msg % (len(allbaserevs), len(newrevs)))
4335 ui.status(msg % (len(allbaserevs), len(newrevs)))
4320 if targetfilterrevs:
4336 if targetfilterrevs:
4321 msg = b'(%d revisions still filtered)\n'
4337 msg = b'(%d revisions still filtered)\n'
4322 ui.status(msg % len(targetfilterrevs))
4338 ui.status(msg % len(targetfilterrevs))
4323
4339
4324 try:
4340 try:
4325 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4341 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4326 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4342 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4327
4343
4328 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4344 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4329 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4345 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4330
4346
4331 bcache = repo.branchmap()
4347 bcache = repo.branchmap()
4332 copy_method = 'copy'
4348 copy_method = 'copy'
4333
4349
4334 copy_base_kwargs = copy_base_kwargs = {}
4350 copy_base_kwargs = copy_base_kwargs = {}
4335 if hasattr(bcache, 'copy'):
4351 if hasattr(bcache, 'copy'):
4336 if 'repo' in getargspec(bcache.copy).args:
4352 if 'repo' in getargspec(bcache.copy).args:
4337 copy_base_kwargs = {"repo": baserepo}
4353 copy_base_kwargs = {"repo": baserepo}
4338 copy_target_kwargs = {"repo": targetrepo}
4354 copy_target_kwargs = {"repo": targetrepo}
4339 else:
4355 else:
4340 copy_method = 'inherit_for'
4356 copy_method = 'inherit_for'
4341 copy_base_kwargs = {"repo": baserepo}
4357 copy_base_kwargs = {"repo": baserepo}
4342 copy_target_kwargs = {"repo": targetrepo}
4358 copy_target_kwargs = {"repo": targetrepo}
4343
4359
4344 # try to find an existing branchmap to reuse
4360 # try to find an existing branchmap to reuse
4345 subsettable = getbranchmapsubsettable()
4361 subsettable = getbranchmapsubsettable()
4346 candidatefilter = subsettable.get(None)
4362 candidatefilter = subsettable.get(None)
4347 while candidatefilter is not None:
4363 while candidatefilter is not None:
4348 candidatebm = repo.filtered(candidatefilter).branchmap()
4364 candidatebm = repo.filtered(candidatefilter).branchmap()
4349 if candidatebm.validfor(baserepo):
4365 if candidatebm.validfor(baserepo):
4350 filtered = repoview.filterrevs(repo, candidatefilter)
4366 filtered = repoview.filterrevs(repo, candidatefilter)
4351 missing = [r for r in allbaserevs if r in filtered]
4367 missing = [r for r in allbaserevs if r in filtered]
4352 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4368 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4353 base.update(baserepo, missing)
4369 base.update(baserepo, missing)
4354 break
4370 break
4355 candidatefilter = subsettable.get(candidatefilter)
4371 candidatefilter = subsettable.get(candidatefilter)
4356 else:
4372 else:
4357 # no suitable subset where found
4373 # no suitable subset where found
4358 base = branchmap.branchcache()
4374 base = branchmap.branchcache()
4359 base.update(baserepo, allbaserevs)
4375 base.update(baserepo, allbaserevs)
4360
4376
4361 def setup():
4377 def setup():
4362 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4378 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4363 if clearcaches:
4379 if clearcaches:
4364 unfi._revbranchcache = None
4380 unfi._revbranchcache = None
4365 clearchangelog(repo)
4381 clearchangelog(repo)
4366
4382
4367 def bench():
4383 def bench():
4368 x[0].update(targetrepo, newrevs)
4384 x[0].update(targetrepo, newrevs)
4369
4385
4370 timer(bench, setup=setup)
4386 timer(bench, setup=setup)
4371 fm.end()
4387 fm.end()
4372 finally:
4388 finally:
4373 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4389 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4374 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4390 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4375
4391
4376
4392
4377 @command(
4393 @command(
4378 b'perf::branchmapload|perfbranchmapload',
4394 b'perf::branchmapload|perfbranchmapload',
4379 [
4395 [
4380 (b'f', b'filter', b'', b'Specify repoview filter'),
4396 (b'f', b'filter', b'', b'Specify repoview filter'),
4381 (b'', b'list', False, b'List brachmap filter caches'),
4397 (b'', b'list', False, b'List brachmap filter caches'),
4382 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4398 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4383 ]
4399 ]
4384 + formatteropts,
4400 + formatteropts,
4385 )
4401 )
4386 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4402 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4387 """benchmark reading the branchmap"""
4403 """benchmark reading the branchmap"""
4388 opts = _byteskwargs(opts)
4404 opts = _byteskwargs(opts)
4389 clearrevlogs = opts[b'clear_revlogs']
4405 clearrevlogs = opts[b'clear_revlogs']
4390
4406
4391 if list:
4407 if list:
4392 for name, kind, st in repo.cachevfs.readdir(stat=True):
4408 for name, kind, st in repo.cachevfs.readdir(stat=True):
4393 if name.startswith(b'branch2'):
4409 if name.startswith(b'branch2'):
4394 filtername = name.partition(b'-')[2] or b'unfiltered'
4410 filtername = name.partition(b'-')[2] or b'unfiltered'
4395 ui.status(
4411 ui.status(
4396 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4412 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4397 )
4413 )
4398 return
4414 return
4399 if not filter:
4415 if not filter:
4400 filter = None
4416 filter = None
4401 subsettable = getbranchmapsubsettable()
4417 subsettable = getbranchmapsubsettable()
4402 if filter is None:
4418 if filter is None:
4403 repo = repo.unfiltered()
4419 repo = repo.unfiltered()
4404 else:
4420 else:
4405 repo = repoview.repoview(repo, filter)
4421 repo = repoview.repoview(repo, filter)
4406
4422
4407 repo.branchmap() # make sure we have a relevant, up to date branchmap
4423 repo.branchmap() # make sure we have a relevant, up to date branchmap
4408
4424
4409 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4425 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4410 if fromfile is None:
4426 if fromfile is None:
4411 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4427 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4412 if fromfile is None:
4428 if fromfile is None:
4413 fromfile = branchmap.read
4429 fromfile = branchmap.read
4414
4430
4415 currentfilter = filter
4431 currentfilter = filter
4416 # try once without timer, the filter may not be cached
4432 # try once without timer, the filter may not be cached
4417 while fromfile(repo) is None:
4433 while fromfile(repo) is None:
4418 currentfilter = subsettable.get(currentfilter)
4434 currentfilter = subsettable.get(currentfilter)
4419 if currentfilter is None:
4435 if currentfilter is None:
4420 raise error.Abort(
4436 raise error.Abort(
4421 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4437 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4422 )
4438 )
4423 repo = repo.filtered(currentfilter)
4439 repo = repo.filtered(currentfilter)
4424 timer, fm = gettimer(ui, opts)
4440 timer, fm = gettimer(ui, opts)
4425
4441
4426 def setup():
4442 def setup():
4427 if clearrevlogs:
4443 if clearrevlogs:
4428 clearchangelog(repo)
4444 clearchangelog(repo)
4429
4445
4430 def bench():
4446 def bench():
4431 fromfile(repo)
4447 fromfile(repo)
4432
4448
4433 timer(bench, setup=setup)
4449 timer(bench, setup=setup)
4434 fm.end()
4450 fm.end()
4435
4451
4436
4452
4437 @command(b'perf::loadmarkers|perfloadmarkers')
4453 @command(b'perf::loadmarkers|perfloadmarkers')
4438 def perfloadmarkers(ui, repo):
4454 def perfloadmarkers(ui, repo):
4439 """benchmark the time to parse the on-disk markers for a repo
4455 """benchmark the time to parse the on-disk markers for a repo
4440
4456
4441 Result is the number of markers in the repo."""
4457 Result is the number of markers in the repo."""
4442 timer, fm = gettimer(ui)
4458 timer, fm = gettimer(ui)
4443 svfs = getsvfs(repo)
4459 svfs = getsvfs(repo)
4444 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4460 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4445 fm.end()
4461 fm.end()
4446
4462
4447
4463
4448 @command(
4464 @command(
4449 b'perf::lrucachedict|perflrucachedict',
4465 b'perf::lrucachedict|perflrucachedict',
4450 formatteropts
4466 formatteropts
4451 + [
4467 + [
4452 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4468 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4453 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4469 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4454 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4470 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4455 (b'', b'size', 4, b'size of cache'),
4471 (b'', b'size', 4, b'size of cache'),
4456 (b'', b'gets', 10000, b'number of key lookups'),
4472 (b'', b'gets', 10000, b'number of key lookups'),
4457 (b'', b'sets', 10000, b'number of key sets'),
4473 (b'', b'sets', 10000, b'number of key sets'),
4458 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4474 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4459 (
4475 (
4460 b'',
4476 b'',
4461 b'mixedgetfreq',
4477 b'mixedgetfreq',
4462 50,
4478 50,
4463 b'frequency of get vs set ops in mixed mode',
4479 b'frequency of get vs set ops in mixed mode',
4464 ),
4480 ),
4465 ],
4481 ],
4466 norepo=True,
4482 norepo=True,
4467 )
4483 )
4468 def perflrucache(
4484 def perflrucache(
4469 ui,
4485 ui,
4470 mincost=0,
4486 mincost=0,
4471 maxcost=100,
4487 maxcost=100,
4472 costlimit=0,
4488 costlimit=0,
4473 size=4,
4489 size=4,
4474 gets=10000,
4490 gets=10000,
4475 sets=10000,
4491 sets=10000,
4476 mixed=10000,
4492 mixed=10000,
4477 mixedgetfreq=50,
4493 mixedgetfreq=50,
4478 **opts
4494 **opts
4479 ):
4495 ):
4480 opts = _byteskwargs(opts)
4496 opts = _byteskwargs(opts)
4481
4497
4482 def doinit():
4498 def doinit():
4483 for i in _xrange(10000):
4499 for i in _xrange(10000):
4484 util.lrucachedict(size)
4500 util.lrucachedict(size)
4485
4501
4486 costrange = list(range(mincost, maxcost + 1))
4502 costrange = list(range(mincost, maxcost + 1))
4487
4503
4488 values = []
4504 values = []
4489 for i in _xrange(size):
4505 for i in _xrange(size):
4490 values.append(random.randint(0, _maxint))
4506 values.append(random.randint(0, _maxint))
4491
4507
4492 # Get mode fills the cache and tests raw lookup performance with no
4508 # Get mode fills the cache and tests raw lookup performance with no
4493 # eviction.
4509 # eviction.
4494 getseq = []
4510 getseq = []
4495 for i in _xrange(gets):
4511 for i in _xrange(gets):
4496 getseq.append(random.choice(values))
4512 getseq.append(random.choice(values))
4497
4513
4498 def dogets():
4514 def dogets():
4499 d = util.lrucachedict(size)
4515 d = util.lrucachedict(size)
4500 for v in values:
4516 for v in values:
4501 d[v] = v
4517 d[v] = v
4502 for key in getseq:
4518 for key in getseq:
4503 value = d[key]
4519 value = d[key]
4504 value # silence pyflakes warning
4520 value # silence pyflakes warning
4505
4521
4506 def dogetscost():
4522 def dogetscost():
4507 d = util.lrucachedict(size, maxcost=costlimit)
4523 d = util.lrucachedict(size, maxcost=costlimit)
4508 for i, v in enumerate(values):
4524 for i, v in enumerate(values):
4509 d.insert(v, v, cost=costs[i])
4525 d.insert(v, v, cost=costs[i])
4510 for key in getseq:
4526 for key in getseq:
4511 try:
4527 try:
4512 value = d[key]
4528 value = d[key]
4513 value # silence pyflakes warning
4529 value # silence pyflakes warning
4514 except KeyError:
4530 except KeyError:
4515 pass
4531 pass
4516
4532
4517 # Set mode tests insertion speed with cache eviction.
4533 # Set mode tests insertion speed with cache eviction.
4518 setseq = []
4534 setseq = []
4519 costs = []
4535 costs = []
4520 for i in _xrange(sets):
4536 for i in _xrange(sets):
4521 setseq.append(random.randint(0, _maxint))
4537 setseq.append(random.randint(0, _maxint))
4522 costs.append(random.choice(costrange))
4538 costs.append(random.choice(costrange))
4523
4539
4524 def doinserts():
4540 def doinserts():
4525 d = util.lrucachedict(size)
4541 d = util.lrucachedict(size)
4526 for v in setseq:
4542 for v in setseq:
4527 d.insert(v, v)
4543 d.insert(v, v)
4528
4544
4529 def doinsertscost():
4545 def doinsertscost():
4530 d = util.lrucachedict(size, maxcost=costlimit)
4546 d = util.lrucachedict(size, maxcost=costlimit)
4531 for i, v in enumerate(setseq):
4547 for i, v in enumerate(setseq):
4532 d.insert(v, v, cost=costs[i])
4548 d.insert(v, v, cost=costs[i])
4533
4549
4534 def dosets():
4550 def dosets():
4535 d = util.lrucachedict(size)
4551 d = util.lrucachedict(size)
4536 for v in setseq:
4552 for v in setseq:
4537 d[v] = v
4553 d[v] = v
4538
4554
4539 # Mixed mode randomly performs gets and sets with eviction.
4555 # Mixed mode randomly performs gets and sets with eviction.
4540 mixedops = []
4556 mixedops = []
4541 for i in _xrange(mixed):
4557 for i in _xrange(mixed):
4542 r = random.randint(0, 100)
4558 r = random.randint(0, 100)
4543 if r < mixedgetfreq:
4559 if r < mixedgetfreq:
4544 op = 0
4560 op = 0
4545 else:
4561 else:
4546 op = 1
4562 op = 1
4547
4563
4548 mixedops.append(
4564 mixedops.append(
4549 (op, random.randint(0, size * 2), random.choice(costrange))
4565 (op, random.randint(0, size * 2), random.choice(costrange))
4550 )
4566 )
4551
4567
4552 def domixed():
4568 def domixed():
4553 d = util.lrucachedict(size)
4569 d = util.lrucachedict(size)
4554
4570
4555 for op, v, cost in mixedops:
4571 for op, v, cost in mixedops:
4556 if op == 0:
4572 if op == 0:
4557 try:
4573 try:
4558 d[v]
4574 d[v]
4559 except KeyError:
4575 except KeyError:
4560 pass
4576 pass
4561 else:
4577 else:
4562 d[v] = v
4578 d[v] = v
4563
4579
4564 def domixedcost():
4580 def domixedcost():
4565 d = util.lrucachedict(size, maxcost=costlimit)
4581 d = util.lrucachedict(size, maxcost=costlimit)
4566
4582
4567 for op, v, cost in mixedops:
4583 for op, v, cost in mixedops:
4568 if op == 0:
4584 if op == 0:
4569 try:
4585 try:
4570 d[v]
4586 d[v]
4571 except KeyError:
4587 except KeyError:
4572 pass
4588 pass
4573 else:
4589 else:
4574 d.insert(v, v, cost=cost)
4590 d.insert(v, v, cost=cost)
4575
4591
4576 benches = [
4592 benches = [
4577 (doinit, b'init'),
4593 (doinit, b'init'),
4578 ]
4594 ]
4579
4595
4580 if costlimit:
4596 if costlimit:
4581 benches.extend(
4597 benches.extend(
4582 [
4598 [
4583 (dogetscost, b'gets w/ cost limit'),
4599 (dogetscost, b'gets w/ cost limit'),
4584 (doinsertscost, b'inserts w/ cost limit'),
4600 (doinsertscost, b'inserts w/ cost limit'),
4585 (domixedcost, b'mixed w/ cost limit'),
4601 (domixedcost, b'mixed w/ cost limit'),
4586 ]
4602 ]
4587 )
4603 )
4588 else:
4604 else:
4589 benches.extend(
4605 benches.extend(
4590 [
4606 [
4591 (dogets, b'gets'),
4607 (dogets, b'gets'),
4592 (doinserts, b'inserts'),
4608 (doinserts, b'inserts'),
4593 (dosets, b'sets'),
4609 (dosets, b'sets'),
4594 (domixed, b'mixed'),
4610 (domixed, b'mixed'),
4595 ]
4611 ]
4596 )
4612 )
4597
4613
4598 for fn, title in benches:
4614 for fn, title in benches:
4599 timer, fm = gettimer(ui, opts)
4615 timer, fm = gettimer(ui, opts)
4600 timer(fn, title=title)
4616 timer(fn, title=title)
4601 fm.end()
4617 fm.end()
4602
4618
4603
4619
4604 @command(
4620 @command(
4605 b'perf::write|perfwrite',
4621 b'perf::write|perfwrite',
4606 formatteropts
4622 formatteropts
4607 + [
4623 + [
4608 (b'', b'write-method', b'write', b'ui write method'),
4624 (b'', b'write-method', b'write', b'ui write method'),
4609 (b'', b'nlines', 100, b'number of lines'),
4625 (b'', b'nlines', 100, b'number of lines'),
4610 (b'', b'nitems', 100, b'number of items (per line)'),
4626 (b'', b'nitems', 100, b'number of items (per line)'),
4611 (b'', b'item', b'x', b'item that is written'),
4627 (b'', b'item', b'x', b'item that is written'),
4612 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4628 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4613 (b'', b'flush-line', None, b'flush after each line'),
4629 (b'', b'flush-line', None, b'flush after each line'),
4614 ],
4630 ],
4615 )
4631 )
4616 def perfwrite(ui, repo, **opts):
4632 def perfwrite(ui, repo, **opts):
4617 """microbenchmark ui.write (and others)"""
4633 """microbenchmark ui.write (and others)"""
4618 opts = _byteskwargs(opts)
4634 opts = _byteskwargs(opts)
4619
4635
4620 write = getattr(ui, _sysstr(opts[b'write_method']))
4636 write = getattr(ui, _sysstr(opts[b'write_method']))
4621 nlines = int(opts[b'nlines'])
4637 nlines = int(opts[b'nlines'])
4622 nitems = int(opts[b'nitems'])
4638 nitems = int(opts[b'nitems'])
4623 item = opts[b'item']
4639 item = opts[b'item']
4624 batch_line = opts.get(b'batch_line')
4640 batch_line = opts.get(b'batch_line')
4625 flush_line = opts.get(b'flush_line')
4641 flush_line = opts.get(b'flush_line')
4626
4642
4627 if batch_line:
4643 if batch_line:
4628 line = item * nitems + b'\n'
4644 line = item * nitems + b'\n'
4629
4645
4630 def benchmark():
4646 def benchmark():
4631 for i in pycompat.xrange(nlines):
4647 for i in pycompat.xrange(nlines):
4632 if batch_line:
4648 if batch_line:
4633 write(line)
4649 write(line)
4634 else:
4650 else:
4635 for i in pycompat.xrange(nitems):
4651 for i in pycompat.xrange(nitems):
4636 write(item)
4652 write(item)
4637 write(b'\n')
4653 write(b'\n')
4638 if flush_line:
4654 if flush_line:
4639 ui.flush()
4655 ui.flush()
4640 ui.flush()
4656 ui.flush()
4641
4657
4642 timer, fm = gettimer(ui, opts)
4658 timer, fm = gettimer(ui, opts)
4643 timer(benchmark)
4659 timer(benchmark)
4644 fm.end()
4660 fm.end()
4645
4661
4646
4662
4647 def uisetup(ui):
4663 def uisetup(ui):
4648 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4664 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4649 commands, b'debugrevlogopts'
4665 commands, b'debugrevlogopts'
4650 ):
4666 ):
4651 # for "historical portability":
4667 # for "historical portability":
4652 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4668 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4653 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4669 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4654 # openrevlog() should cause failure, because it has been
4670 # openrevlog() should cause failure, because it has been
4655 # available since 3.5 (or 49c583ca48c4).
4671 # available since 3.5 (or 49c583ca48c4).
4656 def openrevlog(orig, repo, cmd, file_, opts):
4672 def openrevlog(orig, repo, cmd, file_, opts):
4657 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4673 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4658 raise error.Abort(
4674 raise error.Abort(
4659 b"This version doesn't support --dir option",
4675 b"This version doesn't support --dir option",
4660 hint=b"use 3.5 or later",
4676 hint=b"use 3.5 or later",
4661 )
4677 )
4662 return orig(repo, cmd, file_, opts)
4678 return orig(repo, cmd, file_, opts)
4663
4679
4664 name = _sysstr(b'openrevlog')
4680 name = _sysstr(b'openrevlog')
4665 extensions.wrapfunction(cmdutil, name, openrevlog)
4681 extensions.wrapfunction(cmdutil, name, openrevlog)
4666
4682
4667
4683
4668 @command(
4684 @command(
4669 b'perf::progress|perfprogress',
4685 b'perf::progress|perfprogress',
4670 formatteropts
4686 formatteropts
4671 + [
4687 + [
4672 (b'', b'topic', b'topic', b'topic for progress messages'),
4688 (b'', b'topic', b'topic', b'topic for progress messages'),
4673 (b'c', b'total', 1000000, b'total value we are progressing to'),
4689 (b'c', b'total', 1000000, b'total value we are progressing to'),
4674 ],
4690 ],
4675 norepo=True,
4691 norepo=True,
4676 )
4692 )
4677 def perfprogress(ui, topic=None, total=None, **opts):
4693 def perfprogress(ui, topic=None, total=None, **opts):
4678 """printing of progress bars"""
4694 """printing of progress bars"""
4679 opts = _byteskwargs(opts)
4695 opts = _byteskwargs(opts)
4680
4696
4681 timer, fm = gettimer(ui, opts)
4697 timer, fm = gettimer(ui, opts)
4682
4698
4683 def doprogress():
4699 def doprogress():
4684 with ui.makeprogress(topic, total=total) as progress:
4700 with ui.makeprogress(topic, total=total) as progress:
4685 for i in _xrange(total):
4701 for i in _xrange(total):
4686 progress.increment()
4702 progress.increment()
4687
4703
4688 timer(doprogress)
4704 timer(doprogress)
4689 fm.end()
4705 fm.end()
@@ -1,484 +1,487
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (by default, the first
63 benchmarked)
63 iteration is benchmarked)
64
65 "profiled-runs"
66 list of iteration to profile (starting from 0)
64
67
65 "run-limits"
68 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
69 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
70 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
71 conditions are considered in order with the following logic:
69
72
70 If benchmark has been running for <time> seconds, and we have performed
73 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
74 <numberofrun> iterations, stop the benchmark,
72
75
73 The default value is: '3.0-100, 10.0-3'
76 The default value is: '3.0-100, 10.0-3'
74
77
75 "stub"
78 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
79 When set, benchmarks will only be run once, useful for testing (default:
77 off)
80 off)
78
81
79 list of commands:
82 list of commands:
80
83
81 perf::addremove
84 perf::addremove
82 (no help text available)
85 (no help text available)
83 perf::ancestors
86 perf::ancestors
84 (no help text available)
87 (no help text available)
85 perf::ancestorset
88 perf::ancestorset
86 (no help text available)
89 (no help text available)
87 perf::annotate
90 perf::annotate
88 (no help text available)
91 (no help text available)
89 perf::bdiff benchmark a bdiff between revisions
92 perf::bdiff benchmark a bdiff between revisions
90 perf::bookmarks
93 perf::bookmarks
91 benchmark parsing bookmarks from disk to memory
94 benchmark parsing bookmarks from disk to memory
92 perf::branchmap
95 perf::branchmap
93 benchmark the update of a branchmap
96 benchmark the update of a branchmap
94 perf::branchmapload
97 perf::branchmapload
95 benchmark reading the branchmap
98 benchmark reading the branchmap
96 perf::branchmapupdate
99 perf::branchmapupdate
97 benchmark branchmap update from for <base> revs to <target>
100 benchmark branchmap update from for <base> revs to <target>
98 revs
101 revs
99 perf::bundle benchmark the creation of a bundle from a repository
102 perf::bundle benchmark the creation of a bundle from a repository
100 perf::bundleread
103 perf::bundleread
101 Benchmark reading of bundle files.
104 Benchmark reading of bundle files.
102 perf::cca (no help text available)
105 perf::cca (no help text available)
103 perf::changegroupchangelog
106 perf::changegroupchangelog
104 Benchmark producing a changelog group for a changegroup.
107 Benchmark producing a changelog group for a changegroup.
105 perf::changeset
108 perf::changeset
106 (no help text available)
109 (no help text available)
107 perf::ctxfiles
110 perf::ctxfiles
108 (no help text available)
111 (no help text available)
109 perf::delta-find
112 perf::delta-find
110 benchmark the process of finding a valid delta for a revlog
113 benchmark the process of finding a valid delta for a revlog
111 revision
114 revision
112 perf::diffwd Profile diff of working directory changes
115 perf::diffwd Profile diff of working directory changes
113 perf::dirfoldmap
116 perf::dirfoldmap
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
117 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 perf::dirs (no help text available)
118 perf::dirs (no help text available)
116 perf::dirstate
119 perf::dirstate
117 benchmap the time of various distate operations
120 benchmap the time of various distate operations
118 perf::dirstatedirs
121 perf::dirstatedirs
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
122 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 perf::dirstatefoldmap
123 perf::dirstatefoldmap
121 benchmap a 'dirstate._map.filefoldmap.get()' request
124 benchmap a 'dirstate._map.filefoldmap.get()' request
122 perf::dirstatewrite
125 perf::dirstatewrite
123 benchmap the time it take to write a dirstate on disk
126 benchmap the time it take to write a dirstate on disk
124 perf::discovery
127 perf::discovery
125 benchmark discovery between local repo and the peer at given
128 benchmark discovery between local repo and the peer at given
126 path
129 path
127 perf::fncacheencode
130 perf::fncacheencode
128 (no help text available)
131 (no help text available)
129 perf::fncacheload
132 perf::fncacheload
130 (no help text available)
133 (no help text available)
131 perf::fncachewrite
134 perf::fncachewrite
132 (no help text available)
135 (no help text available)
133 perf::heads benchmark the computation of a changelog heads
136 perf::heads benchmark the computation of a changelog heads
134 perf::helper-mergecopies
137 perf::helper-mergecopies
135 find statistics about potential parameters for
138 find statistics about potential parameters for
136 'perfmergecopies'
139 'perfmergecopies'
137 perf::helper-pathcopies
140 perf::helper-pathcopies
138 find statistic about potential parameters for the
141 find statistic about potential parameters for the
139 'perftracecopies'
142 'perftracecopies'
140 perf::ignore benchmark operation related to computing ignore
143 perf::ignore benchmark operation related to computing ignore
141 perf::index benchmark index creation time followed by a lookup
144 perf::index benchmark index creation time followed by a lookup
142 perf::linelogedits
145 perf::linelogedits
143 (no help text available)
146 (no help text available)
144 perf::loadmarkers
147 perf::loadmarkers
145 benchmark the time to parse the on-disk markers for a repo
148 benchmark the time to parse the on-disk markers for a repo
146 perf::log (no help text available)
149 perf::log (no help text available)
147 perf::lookup (no help text available)
150 perf::lookup (no help text available)
148 perf::lrucachedict
151 perf::lrucachedict
149 (no help text available)
152 (no help text available)
150 perf::manifest
153 perf::manifest
151 benchmark the time to read a manifest from disk and return a
154 benchmark the time to read a manifest from disk and return a
152 usable
155 usable
153 perf::mergecalculate
156 perf::mergecalculate
154 (no help text available)
157 (no help text available)
155 perf::mergecopies
158 perf::mergecopies
156 measure runtime of 'copies.mergecopies'
159 measure runtime of 'copies.mergecopies'
157 perf::moonwalk
160 perf::moonwalk
158 benchmark walking the changelog backwards
161 benchmark walking the changelog backwards
159 perf::nodelookup
162 perf::nodelookup
160 (no help text available)
163 (no help text available)
161 perf::nodemap
164 perf::nodemap
162 benchmark the time necessary to look up revision from a cold
165 benchmark the time necessary to look up revision from a cold
163 nodemap
166 nodemap
164 perf::parents
167 perf::parents
165 benchmark the time necessary to fetch one changeset's parents.
168 benchmark the time necessary to fetch one changeset's parents.
166 perf::pathcopies
169 perf::pathcopies
167 benchmark the copy tracing logic
170 benchmark the copy tracing logic
168 perf::phases benchmark phasesets computation
171 perf::phases benchmark phasesets computation
169 perf::phasesremote
172 perf::phasesremote
170 benchmark time needed to analyse phases of the remote server
173 benchmark time needed to analyse phases of the remote server
171 perf::progress
174 perf::progress
172 printing of progress bars
175 printing of progress bars
173 perf::rawfiles
176 perf::rawfiles
174 (no help text available)
177 (no help text available)
175 perf::revlogchunks
178 perf::revlogchunks
176 Benchmark operations on revlog chunks.
179 Benchmark operations on revlog chunks.
177 perf::revlogindex
180 perf::revlogindex
178 Benchmark operations against a revlog index.
181 Benchmark operations against a revlog index.
179 perf::revlogrevision
182 perf::revlogrevision
180 Benchmark obtaining a revlog revision.
183 Benchmark obtaining a revlog revision.
181 perf::revlogrevisions
184 perf::revlogrevisions
182 Benchmark reading a series of revisions from a revlog.
185 Benchmark reading a series of revisions from a revlog.
183 perf::revlogwrite
186 perf::revlogwrite
184 Benchmark writing a series of revisions to a revlog.
187 Benchmark writing a series of revisions to a revlog.
185 perf::revrange
188 perf::revrange
186 (no help text available)
189 (no help text available)
187 perf::revset benchmark the execution time of a revset
190 perf::revset benchmark the execution time of a revset
188 perf::startup
191 perf::startup
189 (no help text available)
192 (no help text available)
190 perf::status benchmark the performance of a single status call
193 perf::status benchmark the performance of a single status call
191 perf::stream-consume
194 perf::stream-consume
192 benchmark the full application of a stream clone
195 benchmark the full application of a stream clone
193 perf::stream-generate
196 perf::stream-generate
194 benchmark the full generation of a stream clone
197 benchmark the full generation of a stream clone
195 perf::stream-locked-section
198 perf::stream-locked-section
196 benchmark the initial, repo-locked, section of a stream-clone
199 benchmark the initial, repo-locked, section of a stream-clone
197 perf::tags Benchmark tags retrieval in various situation
200 perf::tags Benchmark tags retrieval in various situation
198 perf::templating
201 perf::templating
199 test the rendering time of a given template
202 test the rendering time of a given template
200 perf::unbundle
203 perf::unbundle
201 benchmark application of a bundle in a repository.
204 benchmark application of a bundle in a repository.
202 perf::unidiff
205 perf::unidiff
203 benchmark a unified diff between revisions
206 benchmark a unified diff between revisions
204 perf::volatilesets
207 perf::volatilesets
205 benchmark the computation of various volatile set
208 benchmark the computation of various volatile set
206 perf::walk (no help text available)
209 perf::walk (no help text available)
207 perf::write microbenchmark ui.write (and others)
210 perf::write microbenchmark ui.write (and others)
208
211
209 (use 'hg help -v perf' to show built-in aliases and global options)
212 (use 'hg help -v perf' to show built-in aliases and global options)
210
213
211 $ hg help perfaddremove
214 $ hg help perfaddremove
212 hg perf::addremove
215 hg perf::addremove
213
216
214 aliases: perfaddremove
217 aliases: perfaddremove
215
218
216 (no help text available)
219 (no help text available)
217
220
218 options:
221 options:
219
222
220 -T --template TEMPLATE display with template
223 -T --template TEMPLATE display with template
221
224
222 (some details hidden, use --verbose to show complete help)
225 (some details hidden, use --verbose to show complete help)
223
226
224 $ hg perfaddremove
227 $ hg perfaddremove
225 $ hg perfancestors
228 $ hg perfancestors
226 $ hg perfancestorset 2
229 $ hg perfancestorset 2
227 $ hg perfannotate a
230 $ hg perfannotate a
228 $ hg perfbdiff -c 1
231 $ hg perfbdiff -c 1
229 $ hg perfbdiff --alldata 1
232 $ hg perfbdiff --alldata 1
230 $ hg perfunidiff -c 1
233 $ hg perfunidiff -c 1
231 $ hg perfunidiff --alldata 1
234 $ hg perfunidiff --alldata 1
232 $ hg perfbookmarks
235 $ hg perfbookmarks
233 $ hg perfbranchmap
236 $ hg perfbranchmap
234 $ hg perfbranchmapload
237 $ hg perfbranchmapload
235 $ hg perfbranchmapupdate --base "not tip" --target "tip"
238 $ hg perfbranchmapupdate --base "not tip" --target "tip"
236 benchmark of branchmap with 3 revisions with 1 new ones
239 benchmark of branchmap with 3 revisions with 1 new ones
237 $ hg perfcca
240 $ hg perfcca
238 $ hg perfchangegroupchangelog
241 $ hg perfchangegroupchangelog
239 $ hg perfchangegroupchangelog --cgversion 01
242 $ hg perfchangegroupchangelog --cgversion 01
240 $ hg perfchangeset 2
243 $ hg perfchangeset 2
241 $ hg perfctxfiles 2
244 $ hg perfctxfiles 2
242 $ hg perfdiffwd
245 $ hg perfdiffwd
243 $ hg perfdirfoldmap
246 $ hg perfdirfoldmap
244 $ hg perfdirs
247 $ hg perfdirs
245 $ hg perfdirstate
248 $ hg perfdirstate
246 $ hg perfdirstate --contains
249 $ hg perfdirstate --contains
247 $ hg perfdirstate --iteration
250 $ hg perfdirstate --iteration
248 $ hg perfdirstatedirs
251 $ hg perfdirstatedirs
249 $ hg perfdirstatefoldmap
252 $ hg perfdirstatefoldmap
250 $ hg perfdirstatewrite
253 $ hg perfdirstatewrite
251 #if repofncache
254 #if repofncache
252 $ hg perffncacheencode
255 $ hg perffncacheencode
253 $ hg perffncacheload
256 $ hg perffncacheload
254 $ hg debugrebuildfncache
257 $ hg debugrebuildfncache
255 fncache already up to date
258 fncache already up to date
256 $ hg perffncachewrite
259 $ hg perffncachewrite
257 $ hg debugrebuildfncache
260 $ hg debugrebuildfncache
258 fncache already up to date
261 fncache already up to date
259 #endif
262 #endif
260 $ hg perfheads
263 $ hg perfheads
261 $ hg perfignore
264 $ hg perfignore
262 $ hg perfindex
265 $ hg perfindex
263 $ hg perflinelogedits -n 1
266 $ hg perflinelogedits -n 1
264 $ hg perfloadmarkers
267 $ hg perfloadmarkers
265 $ hg perflog
268 $ hg perflog
266 $ hg perflookup 2
269 $ hg perflookup 2
267 $ hg perflrucache
270 $ hg perflrucache
268 $ hg perfmanifest 2
271 $ hg perfmanifest 2
269 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
272 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
270 $ hg perfmanifest -m 44fe2c8352bb
273 $ hg perfmanifest -m 44fe2c8352bb
271 abort: manifest revision must be integer or full node
274 abort: manifest revision must be integer or full node
272 [255]
275 [255]
273 $ hg perfmergecalculate -r 3
276 $ hg perfmergecalculate -r 3
274 $ hg perfmoonwalk
277 $ hg perfmoonwalk
275 $ hg perfnodelookup 2
278 $ hg perfnodelookup 2
276 $ hg perfpathcopies 1 2
279 $ hg perfpathcopies 1 2
277 $ hg perfprogress --total 1000
280 $ hg perfprogress --total 1000
278 $ hg perfrawfiles 2
281 $ hg perfrawfiles 2
279 $ hg perfrevlogindex -c
282 $ hg perfrevlogindex -c
280 #if reporevlogstore
283 #if reporevlogstore
281 $ hg perfrevlogrevisions .hg/store/data/a.i
284 $ hg perfrevlogrevisions .hg/store/data/a.i
282 #endif
285 #endif
283 $ hg perfrevlogrevision -m 0
286 $ hg perfrevlogrevision -m 0
284 $ hg perfrevlogchunks -c
287 $ hg perfrevlogchunks -c
285 $ hg perfrevrange
288 $ hg perfrevrange
286 $ hg perfrevset 'all()'
289 $ hg perfrevset 'all()'
287 $ hg perfstartup
290 $ hg perfstartup
288 $ hg perfstatus
291 $ hg perfstatus
289 $ hg perfstatus --dirstate
292 $ hg perfstatus --dirstate
290 $ hg perftags
293 $ hg perftags
291 $ hg perftemplating
294 $ hg perftemplating
292 $ hg perfvolatilesets
295 $ hg perfvolatilesets
293 $ hg perfwalk
296 $ hg perfwalk
294 $ hg perfparents
297 $ hg perfparents
295 $ hg perfdiscovery -q .
298 $ hg perfdiscovery -q .
296 $ hg perf::phases
299 $ hg perf::phases
297
300
298 Test run control
301 Test run control
299 ----------------
302 ----------------
300
303
301 Simple single entry
304 Simple single entry
302
305
303 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
306 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
304 ! wall * comb * user * sys * (best of 15) (glob)
307 ! wall * comb * user * sys * (best of 15) (glob)
305 ! wall * comb * user * sys * (max of 15) (glob)
308 ! wall * comb * user * sys * (max of 15) (glob)
306 ! wall * comb * user * sys * (avg of 15) (glob)
309 ! wall * comb * user * sys * (avg of 15) (glob)
307 ! wall * comb * user * sys * (median of 15) (glob)
310 ! wall * comb * user * sys * (median of 15) (glob)
308
311
309 Multiple entries
312 Multiple entries
310
313
311 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
314 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
312 ! wall * comb * user * sys * (best of 50) (glob)
315 ! wall * comb * user * sys * (best of 50) (glob)
313 ! wall * comb * user * sys * (max of 50) (glob)
316 ! wall * comb * user * sys * (max of 50) (glob)
314 ! wall * comb * user * sys * (avg of 50) (glob)
317 ! wall * comb * user * sys * (avg of 50) (glob)
315 ! wall * comb * user * sys * (median of 50) (glob)
318 ! wall * comb * user * sys * (median of 50) (glob)
316
319
317 error case are ignored
320 error case are ignored
318
321
319 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
322 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
320 malformatted run limit entry, missing "-": 500
323 malformatted run limit entry, missing "-": 500
321 ! wall * comb * user * sys * (best of 50) (glob)
324 ! wall * comb * user * sys * (best of 50) (glob)
322 ! wall * comb * user * sys * (max of 50) (glob)
325 ! wall * comb * user * sys * (max of 50) (glob)
323 ! wall * comb * user * sys * (avg of 50) (glob)
326 ! wall * comb * user * sys * (avg of 50) (glob)
324 ! wall * comb * user * sys * (median of 50) (glob)
327 ! wall * comb * user * sys * (median of 50) (glob)
325 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
328 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
326 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
329 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
327 ! wall * comb * user * sys * (best of 50) (glob)
330 ! wall * comb * user * sys * (best of 50) (glob)
328 ! wall * comb * user * sys * (max of 50) (glob)
331 ! wall * comb * user * sys * (max of 50) (glob)
329 ! wall * comb * user * sys * (avg of 50) (glob)
332 ! wall * comb * user * sys * (avg of 50) (glob)
330 ! wall * comb * user * sys * (median of 50) (glob)
333 ! wall * comb * user * sys * (median of 50) (glob)
331 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
334 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
332 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
335 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
333 ! wall * comb * user * sys * (best of 50) (glob)
336 ! wall * comb * user * sys * (best of 50) (glob)
334 ! wall * comb * user * sys * (max of 50) (glob)
337 ! wall * comb * user * sys * (max of 50) (glob)
335 ! wall * comb * user * sys * (avg of 50) (glob)
338 ! wall * comb * user * sys * (avg of 50) (glob)
336 ! wall * comb * user * sys * (median of 50) (glob)
339 ! wall * comb * user * sys * (median of 50) (glob)
337
340
338 test actual output
341 test actual output
339 ------------------
342 ------------------
340
343
341 normal output:
344 normal output:
342
345
343 $ hg perfheads --config perf.stub=no
346 $ hg perfheads --config perf.stub=no
344 ! wall * comb * user * sys * (best of *) (glob)
347 ! wall * comb * user * sys * (best of *) (glob)
345 ! wall * comb * user * sys * (max of *) (glob)
348 ! wall * comb * user * sys * (max of *) (glob)
346 ! wall * comb * user * sys * (avg of *) (glob)
349 ! wall * comb * user * sys * (avg of *) (glob)
347 ! wall * comb * user * sys * (median of *) (glob)
350 ! wall * comb * user * sys * (median of *) (glob)
348
351
349 detailed output:
352 detailed output:
350
353
351 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
354 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
352 ! wall * comb * user * sys * (best of *) (glob)
355 ! wall * comb * user * sys * (best of *) (glob)
353 ! wall * comb * user * sys * (max of *) (glob)
356 ! wall * comb * user * sys * (max of *) (glob)
354 ! wall * comb * user * sys * (avg of *) (glob)
357 ! wall * comb * user * sys * (avg of *) (glob)
355 ! wall * comb * user * sys * (median of *) (glob)
358 ! wall * comb * user * sys * (median of *) (glob)
356
359
357 test json output
360 test json output
358 ----------------
361 ----------------
359
362
360 normal output:
363 normal output:
361
364
362 $ hg perfheads --template json --config perf.stub=no
365 $ hg perfheads --template json --config perf.stub=no
363 [
366 [
364 {
367 {
365 "avg.comb": *, (glob)
368 "avg.comb": *, (glob)
366 "avg.count": *, (glob)
369 "avg.count": *, (glob)
367 "avg.sys": *, (glob)
370 "avg.sys": *, (glob)
368 "avg.user": *, (glob)
371 "avg.user": *, (glob)
369 "avg.wall": *, (glob)
372 "avg.wall": *, (glob)
370 "comb": *, (glob)
373 "comb": *, (glob)
371 "count": *, (glob)
374 "count": *, (glob)
372 "max.comb": *, (glob)
375 "max.comb": *, (glob)
373 "max.count": *, (glob)
376 "max.count": *, (glob)
374 "max.sys": *, (glob)
377 "max.sys": *, (glob)
375 "max.user": *, (glob)
378 "max.user": *, (glob)
376 "max.wall": *, (glob)
379 "max.wall": *, (glob)
377 "median.comb": *, (glob)
380 "median.comb": *, (glob)
378 "median.count": *, (glob)
381 "median.count": *, (glob)
379 "median.sys": *, (glob)
382 "median.sys": *, (glob)
380 "median.user": *, (glob)
383 "median.user": *, (glob)
381 "median.wall": *, (glob)
384 "median.wall": *, (glob)
382 "sys": *, (glob)
385 "sys": *, (glob)
383 "user": *, (glob)
386 "user": *, (glob)
384 "wall": * (glob)
387 "wall": * (glob)
385 }
388 }
386 ]
389 ]
387
390
388 detailed output:
391 detailed output:
389
392
390 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
393 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
391 [
394 [
392 {
395 {
393 "avg.comb": *, (glob)
396 "avg.comb": *, (glob)
394 "avg.count": *, (glob)
397 "avg.count": *, (glob)
395 "avg.sys": *, (glob)
398 "avg.sys": *, (glob)
396 "avg.user": *, (glob)
399 "avg.user": *, (glob)
397 "avg.wall": *, (glob)
400 "avg.wall": *, (glob)
398 "comb": *, (glob)
401 "comb": *, (glob)
399 "count": *, (glob)
402 "count": *, (glob)
400 "max.comb": *, (glob)
403 "max.comb": *, (glob)
401 "max.count": *, (glob)
404 "max.count": *, (glob)
402 "max.sys": *, (glob)
405 "max.sys": *, (glob)
403 "max.user": *, (glob)
406 "max.user": *, (glob)
404 "max.wall": *, (glob)
407 "max.wall": *, (glob)
405 "median.comb": *, (glob)
408 "median.comb": *, (glob)
406 "median.count": *, (glob)
409 "median.count": *, (glob)
407 "median.sys": *, (glob)
410 "median.sys": *, (glob)
408 "median.user": *, (glob)
411 "median.user": *, (glob)
409 "median.wall": *, (glob)
412 "median.wall": *, (glob)
410 "sys": *, (glob)
413 "sys": *, (glob)
411 "user": *, (glob)
414 "user": *, (glob)
412 "wall": * (glob)
415 "wall": * (glob)
413 }
416 }
414 ]
417 ]
415
418
416 Test pre-run feature
419 Test pre-run feature
417 --------------------
420 --------------------
418
421
419 (perf discovery has some spurious output)
422 (perf discovery has some spurious output)
420
423
421 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
424 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
422 ! wall * comb * user * sys * (best of 1) (glob)
425 ! wall * comb * user * sys * (best of 1) (glob)
423 ! wall * comb * user * sys * (max of 1) (glob)
426 ! wall * comb * user * sys * (max of 1) (glob)
424 ! wall * comb * user * sys * (avg of 1) (glob)
427 ! wall * comb * user * sys * (avg of 1) (glob)
425 ! wall * comb * user * sys * (median of 1) (glob)
428 ! wall * comb * user * sys * (median of 1) (glob)
426 searching for changes
429 searching for changes
427 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
430 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
428 ! wall * comb * user * sys * (best of 1) (glob)
431 ! wall * comb * user * sys * (best of 1) (glob)
429 ! wall * comb * user * sys * (max of 1) (glob)
432 ! wall * comb * user * sys * (max of 1) (glob)
430 ! wall * comb * user * sys * (avg of 1) (glob)
433 ! wall * comb * user * sys * (avg of 1) (glob)
431 ! wall * comb * user * sys * (median of 1) (glob)
434 ! wall * comb * user * sys * (median of 1) (glob)
432 searching for changes
435 searching for changes
433 searching for changes
436 searching for changes
434 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
437 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
435 ! wall * comb * user * sys * (best of 1) (glob)
438 ! wall * comb * user * sys * (best of 1) (glob)
436 ! wall * comb * user * sys * (max of 1) (glob)
439 ! wall * comb * user * sys * (max of 1) (glob)
437 ! wall * comb * user * sys * (avg of 1) (glob)
440 ! wall * comb * user * sys * (avg of 1) (glob)
438 ! wall * comb * user * sys * (median of 1) (glob)
441 ! wall * comb * user * sys * (median of 1) (glob)
439 searching for changes
442 searching for changes
440 searching for changes
443 searching for changes
441 searching for changes
444 searching for changes
442 searching for changes
445 searching for changes
443 $ hg perf::bundle 'last(all(), 5)'
446 $ hg perf::bundle 'last(all(), 5)'
444 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
447 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
445 4 changesets found
448 4 changesets found
446 $ hg perf::unbundle last-5.hg
449 $ hg perf::unbundle last-5.hg
447
450
448
451
449 test profile-benchmark option
452 test profile-benchmark option
450 ------------------------------
453 ------------------------------
451
454
452 Function to check that statprof ran
455 Function to check that statprof ran
453 $ statprofran () {
456 $ statprofran () {
454 > grep -E 'Sample count:|No samples recorded' > /dev/null
457 > grep -E 'Sample count:|No samples recorded' > /dev/null
455 > }
458 > }
456 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
459 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
457
460
458 Check perf.py for historical portability
461 Check perf.py for historical portability
459 ----------------------------------------
462 ----------------------------------------
460
463
461 $ cd "$TESTDIR/.."
464 $ cd "$TESTDIR/.."
462
465
463 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
466 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
464 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
467 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
465 > "$TESTDIR"/check-perf-code.py contrib/perf.py
468 > "$TESTDIR"/check-perf-code.py contrib/perf.py
466 contrib/perf.py:\d+: (re)
469 contrib/perf.py:\d+: (re)
467 > from mercurial import (
470 > from mercurial import (
468 import newer module separately in try clause for early Mercurial
471 import newer module separately in try clause for early Mercurial
469 contrib/perf.py:\d+: (re)
472 contrib/perf.py:\d+: (re)
470 > from mercurial import (
473 > from mercurial import (
471 import newer module separately in try clause for early Mercurial
474 import newer module separately in try clause for early Mercurial
472 contrib/perf.py:\d+: (re)
475 contrib/perf.py:\d+: (re)
473 > origindexpath = orig.opener.join(indexfile)
476 > origindexpath = orig.opener.join(indexfile)
474 use getvfs()/getsvfs() for early Mercurial
477 use getvfs()/getsvfs() for early Mercurial
475 contrib/perf.py:\d+: (re)
478 contrib/perf.py:\d+: (re)
476 > origdatapath = orig.opener.join(datafile)
479 > origdatapath = orig.opener.join(datafile)
477 use getvfs()/getsvfs() for early Mercurial
480 use getvfs()/getsvfs() for early Mercurial
478 contrib/perf.py:\d+: (re)
481 contrib/perf.py:\d+: (re)
479 > vfs = vfsmod.vfs(tmpdir)
482 > vfs = vfsmod.vfs(tmpdir)
480 use getvfs()/getsvfs() for early Mercurial
483 use getvfs()/getsvfs() for early Mercurial
481 contrib/perf.py:\d+: (re)
484 contrib/perf.py:\d+: (re)
482 > vfs.options = getattr(orig.opener, 'options', None)
485 > vfs.options = getattr(orig.opener, 'options', None)
483 use getvfs()/getsvfs() for early Mercurial
486 use getvfs()/getsvfs() for early Mercurial
484 [1]
487 [1]
General Comments 0
You need to be logged in to leave comments. Login now