##// END OF EJS Templates
perf: add a perf::stream-generate command...
marmoute -
r51570:b8de54ac default
parent child Browse files
Show More
@@ -1,4337 +1,4369 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", False)
459 displayall = ui.configbool(b"perf", b"all-timing", False)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 @command(
885 @command(
886 b'perf::tags|perftags',
886 b'perf::tags|perftags',
887 formatteropts
887 formatteropts
888 + [
888 + [
889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
890 ],
890 ],
891 )
891 )
892 def perftags(ui, repo, **opts):
892 def perftags(ui, repo, **opts):
893 opts = _byteskwargs(opts)
893 opts = _byteskwargs(opts)
894 timer, fm = gettimer(ui, opts)
894 timer, fm = gettimer(ui, opts)
895 repocleartagscache = repocleartagscachefunc(repo)
895 repocleartagscache = repocleartagscachefunc(repo)
896 clearrevlogs = opts[b'clear_revlogs']
896 clearrevlogs = opts[b'clear_revlogs']
897
897
898 def s():
898 def s():
899 if clearrevlogs:
899 if clearrevlogs:
900 clearchangelog(repo)
900 clearchangelog(repo)
901 clearfilecache(repo.unfiltered(), 'manifest')
901 clearfilecache(repo.unfiltered(), 'manifest')
902 repocleartagscache()
902 repocleartagscache()
903
903
904 def t():
904 def t():
905 return len(repo.tags())
905 return len(repo.tags())
906
906
907 timer(t, setup=s)
907 timer(t, setup=s)
908 fm.end()
908 fm.end()
909
909
910
910
911 @command(b'perf::ancestors|perfancestors', formatteropts)
911 @command(b'perf::ancestors|perfancestors', formatteropts)
912 def perfancestors(ui, repo, **opts):
912 def perfancestors(ui, repo, **opts):
913 opts = _byteskwargs(opts)
913 opts = _byteskwargs(opts)
914 timer, fm = gettimer(ui, opts)
914 timer, fm = gettimer(ui, opts)
915 heads = repo.changelog.headrevs()
915 heads = repo.changelog.headrevs()
916
916
917 def d():
917 def d():
918 for a in repo.changelog.ancestors(heads):
918 for a in repo.changelog.ancestors(heads):
919 pass
919 pass
920
920
921 timer(d)
921 timer(d)
922 fm.end()
922 fm.end()
923
923
924
924
925 @command(b'perf::ancestorset|perfancestorset', formatteropts)
925 @command(b'perf::ancestorset|perfancestorset', formatteropts)
926 def perfancestorset(ui, repo, revset, **opts):
926 def perfancestorset(ui, repo, revset, **opts):
927 opts = _byteskwargs(opts)
927 opts = _byteskwargs(opts)
928 timer, fm = gettimer(ui, opts)
928 timer, fm = gettimer(ui, opts)
929 revs = repo.revs(revset)
929 revs = repo.revs(revset)
930 heads = repo.changelog.headrevs()
930 heads = repo.changelog.headrevs()
931
931
932 def d():
932 def d():
933 s = repo.changelog.ancestors(heads)
933 s = repo.changelog.ancestors(heads)
934 for rev in revs:
934 for rev in revs:
935 rev in s
935 rev in s
936
936
937 timer(d)
937 timer(d)
938 fm.end()
938 fm.end()
939
939
940
940
941 @command(
941 @command(
942 b'perf::delta-find',
942 b'perf::delta-find',
943 revlogopts + formatteropts,
943 revlogopts + formatteropts,
944 b'-c|-m|FILE REV',
944 b'-c|-m|FILE REV',
945 )
945 )
946 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
946 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
947 """benchmark the process of finding a valid delta for a revlog revision
947 """benchmark the process of finding a valid delta for a revlog revision
948
948
949 When a revlog receives a new revision (e.g. from a commit, or from an
949 When a revlog receives a new revision (e.g. from a commit, or from an
950 incoming bundle), it searches for a suitable delta-base to produce a delta.
950 incoming bundle), it searches for a suitable delta-base to produce a delta.
951 This perf command measures how much time we spend in this process. It
951 This perf command measures how much time we spend in this process. It
952 operates on an already stored revision.
952 operates on an already stored revision.
953
953
954 See `hg help debug-delta-find` for another related command.
954 See `hg help debug-delta-find` for another related command.
955 """
955 """
956 from mercurial import revlogutils
956 from mercurial import revlogutils
957 import mercurial.revlogutils.deltas as deltautil
957 import mercurial.revlogutils.deltas as deltautil
958
958
959 opts = _byteskwargs(opts)
959 opts = _byteskwargs(opts)
960 if arg_2 is None:
960 if arg_2 is None:
961 file_ = None
961 file_ = None
962 rev = arg_1
962 rev = arg_1
963 else:
963 else:
964 file_ = arg_1
964 file_ = arg_1
965 rev = arg_2
965 rev = arg_2
966
966
967 repo = repo.unfiltered()
967 repo = repo.unfiltered()
968
968
969 timer, fm = gettimer(ui, opts)
969 timer, fm = gettimer(ui, opts)
970
970
971 rev = int(rev)
971 rev = int(rev)
972
972
973 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
973 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
974
974
975 deltacomputer = deltautil.deltacomputer(revlog)
975 deltacomputer = deltautil.deltacomputer(revlog)
976
976
977 node = revlog.node(rev)
977 node = revlog.node(rev)
978 p1r, p2r = revlog.parentrevs(rev)
978 p1r, p2r = revlog.parentrevs(rev)
979 p1 = revlog.node(p1r)
979 p1 = revlog.node(p1r)
980 p2 = revlog.node(p2r)
980 p2 = revlog.node(p2r)
981 full_text = revlog.revision(rev)
981 full_text = revlog.revision(rev)
982 textlen = len(full_text)
982 textlen = len(full_text)
983 cachedelta = None
983 cachedelta = None
984 flags = revlog.flags(rev)
984 flags = revlog.flags(rev)
985
985
986 revinfo = revlogutils.revisioninfo(
986 revinfo = revlogutils.revisioninfo(
987 node,
987 node,
988 p1,
988 p1,
989 p2,
989 p2,
990 [full_text], # btext
990 [full_text], # btext
991 textlen,
991 textlen,
992 cachedelta,
992 cachedelta,
993 flags,
993 flags,
994 )
994 )
995
995
996 # Note: we should probably purge the potential caches (like the full
996 # Note: we should probably purge the potential caches (like the full
997 # manifest cache) between runs.
997 # manifest cache) between runs.
998 def find_one():
998 def find_one():
999 with revlog._datafp() as fh:
999 with revlog._datafp() as fh:
1000 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1000 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1001
1001
1002 timer(find_one)
1002 timer(find_one)
1003 fm.end()
1003 fm.end()
1004
1004
1005
1005
1006 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1006 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1007 def perfdiscovery(ui, repo, path, **opts):
1007 def perfdiscovery(ui, repo, path, **opts):
1008 """benchmark discovery between local repo and the peer at given path"""
1008 """benchmark discovery between local repo and the peer at given path"""
1009 repos = [repo, None]
1009 repos = [repo, None]
1010 timer, fm = gettimer(ui, opts)
1010 timer, fm = gettimer(ui, opts)
1011
1011
1012 try:
1012 try:
1013 from mercurial.utils.urlutil import get_unique_pull_path_obj
1013 from mercurial.utils.urlutil import get_unique_pull_path_obj
1014
1014
1015 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1015 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1016 except ImportError:
1016 except ImportError:
1017 try:
1017 try:
1018 from mercurial.utils.urlutil import get_unique_pull_path
1018 from mercurial.utils.urlutil import get_unique_pull_path
1019
1019
1020 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1020 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1021 except ImportError:
1021 except ImportError:
1022 path = ui.expandpath(path)
1022 path = ui.expandpath(path)
1023
1023
1024 def s():
1024 def s():
1025 repos[1] = hg.peer(ui, opts, path)
1025 repos[1] = hg.peer(ui, opts, path)
1026
1026
1027 def d():
1027 def d():
1028 setdiscovery.findcommonheads(ui, *repos)
1028 setdiscovery.findcommonheads(ui, *repos)
1029
1029
1030 timer(d, setup=s)
1030 timer(d, setup=s)
1031 fm.end()
1031 fm.end()
1032
1032
1033
1033
1034 @command(
1034 @command(
1035 b'perf::bookmarks|perfbookmarks',
1035 b'perf::bookmarks|perfbookmarks',
1036 formatteropts
1036 formatteropts
1037 + [
1037 + [
1038 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1038 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1039 ],
1039 ],
1040 )
1040 )
1041 def perfbookmarks(ui, repo, **opts):
1041 def perfbookmarks(ui, repo, **opts):
1042 """benchmark parsing bookmarks from disk to memory"""
1042 """benchmark parsing bookmarks from disk to memory"""
1043 opts = _byteskwargs(opts)
1043 opts = _byteskwargs(opts)
1044 timer, fm = gettimer(ui, opts)
1044 timer, fm = gettimer(ui, opts)
1045
1045
1046 clearrevlogs = opts[b'clear_revlogs']
1046 clearrevlogs = opts[b'clear_revlogs']
1047
1047
1048 def s():
1048 def s():
1049 if clearrevlogs:
1049 if clearrevlogs:
1050 clearchangelog(repo)
1050 clearchangelog(repo)
1051 clearfilecache(repo, b'_bookmarks')
1051 clearfilecache(repo, b'_bookmarks')
1052
1052
1053 def d():
1053 def d():
1054 repo._bookmarks
1054 repo._bookmarks
1055
1055
1056 timer(d, setup=s)
1056 timer(d, setup=s)
1057 fm.end()
1057 fm.end()
1058
1058
1059
1059
1060 @command(
1060 @command(
1061 b'perf::bundle',
1061 b'perf::bundle',
1062 [
1062 [
1063 (
1063 (
1064 b'r',
1064 b'r',
1065 b'rev',
1065 b'rev',
1066 [],
1066 [],
1067 b'changesets to bundle',
1067 b'changesets to bundle',
1068 b'REV',
1068 b'REV',
1069 ),
1069 ),
1070 (
1070 (
1071 b't',
1071 b't',
1072 b'type',
1072 b'type',
1073 b'none',
1073 b'none',
1074 b'bundlespec to use (see `hg help bundlespec`)',
1074 b'bundlespec to use (see `hg help bundlespec`)',
1075 b'TYPE',
1075 b'TYPE',
1076 ),
1076 ),
1077 ]
1077 ]
1078 + formatteropts,
1078 + formatteropts,
1079 b'REVS',
1079 b'REVS',
1080 )
1080 )
1081 def perfbundle(ui, repo, *revs, **opts):
1081 def perfbundle(ui, repo, *revs, **opts):
1082 """benchmark the creation of a bundle from a repository
1082 """benchmark the creation of a bundle from a repository
1083
1083
1084 For now, this only supports "none" compression.
1084 For now, this only supports "none" compression.
1085 """
1085 """
1086 try:
1086 try:
1087 from mercurial import bundlecaches
1087 from mercurial import bundlecaches
1088
1088
1089 parsebundlespec = bundlecaches.parsebundlespec
1089 parsebundlespec = bundlecaches.parsebundlespec
1090 except ImportError:
1090 except ImportError:
1091 from mercurial import exchange
1091 from mercurial import exchange
1092
1092
1093 parsebundlespec = exchange.parsebundlespec
1093 parsebundlespec = exchange.parsebundlespec
1094
1094
1095 from mercurial import discovery
1095 from mercurial import discovery
1096 from mercurial import bundle2
1096 from mercurial import bundle2
1097
1097
1098 opts = _byteskwargs(opts)
1098 opts = _byteskwargs(opts)
1099 timer, fm = gettimer(ui, opts)
1099 timer, fm = gettimer(ui, opts)
1100
1100
1101 cl = repo.changelog
1101 cl = repo.changelog
1102 revs = list(revs)
1102 revs = list(revs)
1103 revs.extend(opts.get(b'rev', ()))
1103 revs.extend(opts.get(b'rev', ()))
1104 revs = scmutil.revrange(repo, revs)
1104 revs = scmutil.revrange(repo, revs)
1105 if not revs:
1105 if not revs:
1106 raise error.Abort(b"not revision specified")
1106 raise error.Abort(b"not revision specified")
1107 # make it a consistent set (ie: without topological gaps)
1107 # make it a consistent set (ie: without topological gaps)
1108 old_len = len(revs)
1108 old_len = len(revs)
1109 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1109 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1110 if old_len != len(revs):
1110 if old_len != len(revs):
1111 new_count = len(revs) - old_len
1111 new_count = len(revs) - old_len
1112 msg = b"add %d new revisions to make it a consistent set\n"
1112 msg = b"add %d new revisions to make it a consistent set\n"
1113 ui.write_err(msg % new_count)
1113 ui.write_err(msg % new_count)
1114
1114
1115 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1115 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1116 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1116 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1117 outgoing = discovery.outgoing(repo, bases, targets)
1117 outgoing = discovery.outgoing(repo, bases, targets)
1118
1118
1119 bundle_spec = opts.get(b'type')
1119 bundle_spec = opts.get(b'type')
1120
1120
1121 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1121 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1122
1122
1123 cgversion = bundle_spec.params.get(b"cg.version")
1123 cgversion = bundle_spec.params.get(b"cg.version")
1124 if cgversion is None:
1124 if cgversion is None:
1125 if bundle_spec.version == b'v1':
1125 if bundle_spec.version == b'v1':
1126 cgversion = b'01'
1126 cgversion = b'01'
1127 if bundle_spec.version == b'v2':
1127 if bundle_spec.version == b'v2':
1128 cgversion = b'02'
1128 cgversion = b'02'
1129 if cgversion not in changegroup.supportedoutgoingversions(repo):
1129 if cgversion not in changegroup.supportedoutgoingversions(repo):
1130 err = b"repository does not support bundle version %s"
1130 err = b"repository does not support bundle version %s"
1131 raise error.Abort(err % cgversion)
1131 raise error.Abort(err % cgversion)
1132
1132
1133 if cgversion == b'01': # bundle1
1133 if cgversion == b'01': # bundle1
1134 bversion = b'HG10' + bundle_spec.wirecompression
1134 bversion = b'HG10' + bundle_spec.wirecompression
1135 bcompression = None
1135 bcompression = None
1136 elif cgversion in (b'02', b'03'):
1136 elif cgversion in (b'02', b'03'):
1137 bversion = b'HG20'
1137 bversion = b'HG20'
1138 bcompression = bundle_spec.wirecompression
1138 bcompression = bundle_spec.wirecompression
1139 else:
1139 else:
1140 err = b'perf::bundle: unexpected changegroup version %s'
1140 err = b'perf::bundle: unexpected changegroup version %s'
1141 raise error.ProgrammingError(err % cgversion)
1141 raise error.ProgrammingError(err % cgversion)
1142
1142
1143 if bcompression is None:
1143 if bcompression is None:
1144 bcompression = b'UN'
1144 bcompression = b'UN'
1145
1145
1146 if bcompression != b'UN':
1146 if bcompression != b'UN':
1147 err = b'perf::bundle: compression currently unsupported: %s'
1147 err = b'perf::bundle: compression currently unsupported: %s'
1148 raise error.ProgrammingError(err % bcompression)
1148 raise error.ProgrammingError(err % bcompression)
1149
1149
1150 def do_bundle():
1150 def do_bundle():
1151 bundle2.writenewbundle(
1151 bundle2.writenewbundle(
1152 ui,
1152 ui,
1153 repo,
1153 repo,
1154 b'perf::bundle',
1154 b'perf::bundle',
1155 os.devnull,
1155 os.devnull,
1156 bversion,
1156 bversion,
1157 outgoing,
1157 outgoing,
1158 bundle_spec.params,
1158 bundle_spec.params,
1159 )
1159 )
1160
1160
1161 timer(do_bundle)
1161 timer(do_bundle)
1162 fm.end()
1162 fm.end()
1163
1163
1164
1164
1165 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1165 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1166 def perfbundleread(ui, repo, bundlepath, **opts):
1166 def perfbundleread(ui, repo, bundlepath, **opts):
1167 """Benchmark reading of bundle files.
1167 """Benchmark reading of bundle files.
1168
1168
1169 This command is meant to isolate the I/O part of bundle reading as
1169 This command is meant to isolate the I/O part of bundle reading as
1170 much as possible.
1170 much as possible.
1171 """
1171 """
1172 from mercurial import (
1172 from mercurial import (
1173 bundle2,
1173 bundle2,
1174 exchange,
1174 exchange,
1175 streamclone,
1175 streamclone,
1176 )
1176 )
1177
1177
1178 opts = _byteskwargs(opts)
1178 opts = _byteskwargs(opts)
1179
1179
1180 def makebench(fn):
1180 def makebench(fn):
1181 def run():
1181 def run():
1182 with open(bundlepath, b'rb') as fh:
1182 with open(bundlepath, b'rb') as fh:
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1184 fn(bundle)
1184 fn(bundle)
1185
1185
1186 return run
1186 return run
1187
1187
1188 def makereadnbytes(size):
1188 def makereadnbytes(size):
1189 def run():
1189 def run():
1190 with open(bundlepath, b'rb') as fh:
1190 with open(bundlepath, b'rb') as fh:
1191 bundle = exchange.readbundle(ui, fh, bundlepath)
1191 bundle = exchange.readbundle(ui, fh, bundlepath)
1192 while bundle.read(size):
1192 while bundle.read(size):
1193 pass
1193 pass
1194
1194
1195 return run
1195 return run
1196
1196
1197 def makestdioread(size):
1197 def makestdioread(size):
1198 def run():
1198 def run():
1199 with open(bundlepath, b'rb') as fh:
1199 with open(bundlepath, b'rb') as fh:
1200 while fh.read(size):
1200 while fh.read(size):
1201 pass
1201 pass
1202
1202
1203 return run
1203 return run
1204
1204
1205 # bundle1
1205 # bundle1
1206
1206
1207 def deltaiter(bundle):
1207 def deltaiter(bundle):
1208 for delta in bundle.deltaiter():
1208 for delta in bundle.deltaiter():
1209 pass
1209 pass
1210
1210
1211 def iterchunks(bundle):
1211 def iterchunks(bundle):
1212 for chunk in bundle.getchunks():
1212 for chunk in bundle.getchunks():
1213 pass
1213 pass
1214
1214
1215 # bundle2
1215 # bundle2
1216
1216
1217 def forwardchunks(bundle):
1217 def forwardchunks(bundle):
1218 for chunk in bundle._forwardchunks():
1218 for chunk in bundle._forwardchunks():
1219 pass
1219 pass
1220
1220
1221 def iterparts(bundle):
1221 def iterparts(bundle):
1222 for part in bundle.iterparts():
1222 for part in bundle.iterparts():
1223 pass
1223 pass
1224
1224
1225 def iterpartsseekable(bundle):
1225 def iterpartsseekable(bundle):
1226 for part in bundle.iterparts(seekable=True):
1226 for part in bundle.iterparts(seekable=True):
1227 pass
1227 pass
1228
1228
1229 def seek(bundle):
1229 def seek(bundle):
1230 for part in bundle.iterparts(seekable=True):
1230 for part in bundle.iterparts(seekable=True):
1231 part.seek(0, os.SEEK_END)
1231 part.seek(0, os.SEEK_END)
1232
1232
1233 def makepartreadnbytes(size):
1233 def makepartreadnbytes(size):
1234 def run():
1234 def run():
1235 with open(bundlepath, b'rb') as fh:
1235 with open(bundlepath, b'rb') as fh:
1236 bundle = exchange.readbundle(ui, fh, bundlepath)
1236 bundle = exchange.readbundle(ui, fh, bundlepath)
1237 for part in bundle.iterparts():
1237 for part in bundle.iterparts():
1238 while part.read(size):
1238 while part.read(size):
1239 pass
1239 pass
1240
1240
1241 return run
1241 return run
1242
1242
1243 benches = [
1243 benches = [
1244 (makestdioread(8192), b'read(8k)'),
1244 (makestdioread(8192), b'read(8k)'),
1245 (makestdioread(16384), b'read(16k)'),
1245 (makestdioread(16384), b'read(16k)'),
1246 (makestdioread(32768), b'read(32k)'),
1246 (makestdioread(32768), b'read(32k)'),
1247 (makestdioread(131072), b'read(128k)'),
1247 (makestdioread(131072), b'read(128k)'),
1248 ]
1248 ]
1249
1249
1250 with open(bundlepath, b'rb') as fh:
1250 with open(bundlepath, b'rb') as fh:
1251 bundle = exchange.readbundle(ui, fh, bundlepath)
1251 bundle = exchange.readbundle(ui, fh, bundlepath)
1252
1252
1253 if isinstance(bundle, changegroup.cg1unpacker):
1253 if isinstance(bundle, changegroup.cg1unpacker):
1254 benches.extend(
1254 benches.extend(
1255 [
1255 [
1256 (makebench(deltaiter), b'cg1 deltaiter()'),
1256 (makebench(deltaiter), b'cg1 deltaiter()'),
1257 (makebench(iterchunks), b'cg1 getchunks()'),
1257 (makebench(iterchunks), b'cg1 getchunks()'),
1258 (makereadnbytes(8192), b'cg1 read(8k)'),
1258 (makereadnbytes(8192), b'cg1 read(8k)'),
1259 (makereadnbytes(16384), b'cg1 read(16k)'),
1259 (makereadnbytes(16384), b'cg1 read(16k)'),
1260 (makereadnbytes(32768), b'cg1 read(32k)'),
1260 (makereadnbytes(32768), b'cg1 read(32k)'),
1261 (makereadnbytes(131072), b'cg1 read(128k)'),
1261 (makereadnbytes(131072), b'cg1 read(128k)'),
1262 ]
1262 ]
1263 )
1263 )
1264 elif isinstance(bundle, bundle2.unbundle20):
1264 elif isinstance(bundle, bundle2.unbundle20):
1265 benches.extend(
1265 benches.extend(
1266 [
1266 [
1267 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1267 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1268 (makebench(iterparts), b'bundle2 iterparts()'),
1268 (makebench(iterparts), b'bundle2 iterparts()'),
1269 (
1269 (
1270 makebench(iterpartsseekable),
1270 makebench(iterpartsseekable),
1271 b'bundle2 iterparts() seekable',
1271 b'bundle2 iterparts() seekable',
1272 ),
1272 ),
1273 (makebench(seek), b'bundle2 part seek()'),
1273 (makebench(seek), b'bundle2 part seek()'),
1274 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1274 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1275 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1275 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1276 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1276 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1277 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1277 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1278 ]
1278 ]
1279 )
1279 )
1280 elif isinstance(bundle, streamclone.streamcloneapplier):
1280 elif isinstance(bundle, streamclone.streamcloneapplier):
1281 raise error.Abort(b'stream clone bundles not supported')
1281 raise error.Abort(b'stream clone bundles not supported')
1282 else:
1282 else:
1283 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1283 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1284
1284
1285 for fn, title in benches:
1285 for fn, title in benches:
1286 timer, fm = gettimer(ui, opts)
1286 timer, fm = gettimer(ui, opts)
1287 timer(fn, title=title)
1287 timer(fn, title=title)
1288 fm.end()
1288 fm.end()
1289
1289
1290
1290
1291 @command(
1291 @command(
1292 b'perf::changegroupchangelog|perfchangegroupchangelog',
1292 b'perf::changegroupchangelog|perfchangegroupchangelog',
1293 formatteropts
1293 formatteropts
1294 + [
1294 + [
1295 (b'', b'cgversion', b'02', b'changegroup version'),
1295 (b'', b'cgversion', b'02', b'changegroup version'),
1296 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1296 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1297 ],
1297 ],
1298 )
1298 )
1299 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1299 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1300 """Benchmark producing a changelog group for a changegroup.
1300 """Benchmark producing a changelog group for a changegroup.
1301
1301
1302 This measures the time spent processing the changelog during a
1302 This measures the time spent processing the changelog during a
1303 bundle operation. This occurs during `hg bundle` and on a server
1303 bundle operation. This occurs during `hg bundle` and on a server
1304 processing a `getbundle` wire protocol request (handles clones
1304 processing a `getbundle` wire protocol request (handles clones
1305 and pull requests).
1305 and pull requests).
1306
1306
1307 By default, all revisions are added to the changegroup.
1307 By default, all revisions are added to the changegroup.
1308 """
1308 """
1309 opts = _byteskwargs(opts)
1309 opts = _byteskwargs(opts)
1310 cl = repo.changelog
1310 cl = repo.changelog
1311 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1311 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1312 bundler = changegroup.getbundler(cgversion, repo)
1312 bundler = changegroup.getbundler(cgversion, repo)
1313
1313
1314 def d():
1314 def d():
1315 state, chunks = bundler._generatechangelog(cl, nodes)
1315 state, chunks = bundler._generatechangelog(cl, nodes)
1316 for chunk in chunks:
1316 for chunk in chunks:
1317 pass
1317 pass
1318
1318
1319 timer, fm = gettimer(ui, opts)
1319 timer, fm = gettimer(ui, opts)
1320
1320
1321 # Terminal printing can interfere with timing. So disable it.
1321 # Terminal printing can interfere with timing. So disable it.
1322 with ui.configoverride({(b'progress', b'disable'): True}):
1322 with ui.configoverride({(b'progress', b'disable'): True}):
1323 timer(d)
1323 timer(d)
1324
1324
1325 fm.end()
1325 fm.end()
1326
1326
1327
1327
1328 @command(b'perf::dirs|perfdirs', formatteropts)
1328 @command(b'perf::dirs|perfdirs', formatteropts)
1329 def perfdirs(ui, repo, **opts):
1329 def perfdirs(ui, repo, **opts):
1330 opts = _byteskwargs(opts)
1330 opts = _byteskwargs(opts)
1331 timer, fm = gettimer(ui, opts)
1331 timer, fm = gettimer(ui, opts)
1332 dirstate = repo.dirstate
1332 dirstate = repo.dirstate
1333 b'a' in dirstate
1333 b'a' in dirstate
1334
1334
1335 def d():
1335 def d():
1336 dirstate.hasdir(b'a')
1336 dirstate.hasdir(b'a')
1337 try:
1337 try:
1338 del dirstate._map._dirs
1338 del dirstate._map._dirs
1339 except AttributeError:
1339 except AttributeError:
1340 pass
1340 pass
1341
1341
1342 timer(d)
1342 timer(d)
1343 fm.end()
1343 fm.end()
1344
1344
1345
1345
1346 @command(
1346 @command(
1347 b'perf::dirstate|perfdirstate',
1347 b'perf::dirstate|perfdirstate',
1348 [
1348 [
1349 (
1349 (
1350 b'',
1350 b'',
1351 b'iteration',
1351 b'iteration',
1352 None,
1352 None,
1353 b'benchmark a full iteration for the dirstate',
1353 b'benchmark a full iteration for the dirstate',
1354 ),
1354 ),
1355 (
1355 (
1356 b'',
1356 b'',
1357 b'contains',
1357 b'contains',
1358 None,
1358 None,
1359 b'benchmark a large amount of `nf in dirstate` calls',
1359 b'benchmark a large amount of `nf in dirstate` calls',
1360 ),
1360 ),
1361 ]
1361 ]
1362 + formatteropts,
1362 + formatteropts,
1363 )
1363 )
1364 def perfdirstate(ui, repo, **opts):
1364 def perfdirstate(ui, repo, **opts):
1365 """benchmap the time of various distate operations
1365 """benchmap the time of various distate operations
1366
1366
1367 By default benchmark the time necessary to load a dirstate from scratch.
1367 By default benchmark the time necessary to load a dirstate from scratch.
1368 The dirstate is loaded to the point were a "contains" request can be
1368 The dirstate is loaded to the point were a "contains" request can be
1369 answered.
1369 answered.
1370 """
1370 """
1371 opts = _byteskwargs(opts)
1371 opts = _byteskwargs(opts)
1372 timer, fm = gettimer(ui, opts)
1372 timer, fm = gettimer(ui, opts)
1373 b"a" in repo.dirstate
1373 b"a" in repo.dirstate
1374
1374
1375 if opts[b'iteration'] and opts[b'contains']:
1375 if opts[b'iteration'] and opts[b'contains']:
1376 msg = b'only specify one of --iteration or --contains'
1376 msg = b'only specify one of --iteration or --contains'
1377 raise error.Abort(msg)
1377 raise error.Abort(msg)
1378
1378
1379 if opts[b'iteration']:
1379 if opts[b'iteration']:
1380 setup = None
1380 setup = None
1381 dirstate = repo.dirstate
1381 dirstate = repo.dirstate
1382
1382
1383 def d():
1383 def d():
1384 for f in dirstate:
1384 for f in dirstate:
1385 pass
1385 pass
1386
1386
1387 elif opts[b'contains']:
1387 elif opts[b'contains']:
1388 setup = None
1388 setup = None
1389 dirstate = repo.dirstate
1389 dirstate = repo.dirstate
1390 allfiles = list(dirstate)
1390 allfiles = list(dirstate)
1391 # also add file path that will be "missing" from the dirstate
1391 # also add file path that will be "missing" from the dirstate
1392 allfiles.extend([f[::-1] for f in allfiles])
1392 allfiles.extend([f[::-1] for f in allfiles])
1393
1393
1394 def d():
1394 def d():
1395 for f in allfiles:
1395 for f in allfiles:
1396 f in dirstate
1396 f in dirstate
1397
1397
1398 else:
1398 else:
1399
1399
1400 def setup():
1400 def setup():
1401 repo.dirstate.invalidate()
1401 repo.dirstate.invalidate()
1402
1402
1403 def d():
1403 def d():
1404 b"a" in repo.dirstate
1404 b"a" in repo.dirstate
1405
1405
1406 timer(d, setup=setup)
1406 timer(d, setup=setup)
1407 fm.end()
1407 fm.end()
1408
1408
1409
1409
1410 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1410 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1411 def perfdirstatedirs(ui, repo, **opts):
1411 def perfdirstatedirs(ui, repo, **opts):
1412 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1412 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1413 opts = _byteskwargs(opts)
1413 opts = _byteskwargs(opts)
1414 timer, fm = gettimer(ui, opts)
1414 timer, fm = gettimer(ui, opts)
1415 repo.dirstate.hasdir(b"a")
1415 repo.dirstate.hasdir(b"a")
1416
1416
1417 def setup():
1417 def setup():
1418 try:
1418 try:
1419 del repo.dirstate._map._dirs
1419 del repo.dirstate._map._dirs
1420 except AttributeError:
1420 except AttributeError:
1421 pass
1421 pass
1422
1422
1423 def d():
1423 def d():
1424 repo.dirstate.hasdir(b"a")
1424 repo.dirstate.hasdir(b"a")
1425
1425
1426 timer(d, setup=setup)
1426 timer(d, setup=setup)
1427 fm.end()
1427 fm.end()
1428
1428
1429
1429
1430 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1430 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1431 def perfdirstatefoldmap(ui, repo, **opts):
1431 def perfdirstatefoldmap(ui, repo, **opts):
1432 """benchmap a `dirstate._map.filefoldmap.get()` request
1432 """benchmap a `dirstate._map.filefoldmap.get()` request
1433
1433
1434 The dirstate filefoldmap cache is dropped between every request.
1434 The dirstate filefoldmap cache is dropped between every request.
1435 """
1435 """
1436 opts = _byteskwargs(opts)
1436 opts = _byteskwargs(opts)
1437 timer, fm = gettimer(ui, opts)
1437 timer, fm = gettimer(ui, opts)
1438 dirstate = repo.dirstate
1438 dirstate = repo.dirstate
1439 dirstate._map.filefoldmap.get(b'a')
1439 dirstate._map.filefoldmap.get(b'a')
1440
1440
1441 def setup():
1441 def setup():
1442 del dirstate._map.filefoldmap
1442 del dirstate._map.filefoldmap
1443
1443
1444 def d():
1444 def d():
1445 dirstate._map.filefoldmap.get(b'a')
1445 dirstate._map.filefoldmap.get(b'a')
1446
1446
1447 timer(d, setup=setup)
1447 timer(d, setup=setup)
1448 fm.end()
1448 fm.end()
1449
1449
1450
1450
1451 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1451 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1452 def perfdirfoldmap(ui, repo, **opts):
1452 def perfdirfoldmap(ui, repo, **opts):
1453 """benchmap a `dirstate._map.dirfoldmap.get()` request
1453 """benchmap a `dirstate._map.dirfoldmap.get()` request
1454
1454
1455 The dirstate dirfoldmap cache is dropped between every request.
1455 The dirstate dirfoldmap cache is dropped between every request.
1456 """
1456 """
1457 opts = _byteskwargs(opts)
1457 opts = _byteskwargs(opts)
1458 timer, fm = gettimer(ui, opts)
1458 timer, fm = gettimer(ui, opts)
1459 dirstate = repo.dirstate
1459 dirstate = repo.dirstate
1460 dirstate._map.dirfoldmap.get(b'a')
1460 dirstate._map.dirfoldmap.get(b'a')
1461
1461
1462 def setup():
1462 def setup():
1463 del dirstate._map.dirfoldmap
1463 del dirstate._map.dirfoldmap
1464 try:
1464 try:
1465 del dirstate._map._dirs
1465 del dirstate._map._dirs
1466 except AttributeError:
1466 except AttributeError:
1467 pass
1467 pass
1468
1468
1469 def d():
1469 def d():
1470 dirstate._map.dirfoldmap.get(b'a')
1470 dirstate._map.dirfoldmap.get(b'a')
1471
1471
1472 timer(d, setup=setup)
1472 timer(d, setup=setup)
1473 fm.end()
1473 fm.end()
1474
1474
1475
1475
1476 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1476 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1477 def perfdirstatewrite(ui, repo, **opts):
1477 def perfdirstatewrite(ui, repo, **opts):
1478 """benchmap the time it take to write a dirstate on disk"""
1478 """benchmap the time it take to write a dirstate on disk"""
1479 opts = _byteskwargs(opts)
1479 opts = _byteskwargs(opts)
1480 timer, fm = gettimer(ui, opts)
1480 timer, fm = gettimer(ui, opts)
1481 ds = repo.dirstate
1481 ds = repo.dirstate
1482 b"a" in ds
1482 b"a" in ds
1483
1483
1484 def setup():
1484 def setup():
1485 ds._dirty = True
1485 ds._dirty = True
1486
1486
1487 def d():
1487 def d():
1488 ds.write(repo.currenttransaction())
1488 ds.write(repo.currenttransaction())
1489
1489
1490 with repo.wlock():
1490 with repo.wlock():
1491 timer(d, setup=setup)
1491 timer(d, setup=setup)
1492 fm.end()
1492 fm.end()
1493
1493
1494
1494
1495 def _getmergerevs(repo, opts):
1495 def _getmergerevs(repo, opts):
1496 """parse command argument to return rev involved in merge
1496 """parse command argument to return rev involved in merge
1497
1497
1498 input: options dictionnary with `rev`, `from` and `bse`
1498 input: options dictionnary with `rev`, `from` and `bse`
1499 output: (localctx, otherctx, basectx)
1499 output: (localctx, otherctx, basectx)
1500 """
1500 """
1501 if opts[b'from']:
1501 if opts[b'from']:
1502 fromrev = scmutil.revsingle(repo, opts[b'from'])
1502 fromrev = scmutil.revsingle(repo, opts[b'from'])
1503 wctx = repo[fromrev]
1503 wctx = repo[fromrev]
1504 else:
1504 else:
1505 wctx = repo[None]
1505 wctx = repo[None]
1506 # we don't want working dir files to be stat'd in the benchmark, so
1506 # we don't want working dir files to be stat'd in the benchmark, so
1507 # prime that cache
1507 # prime that cache
1508 wctx.dirty()
1508 wctx.dirty()
1509 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1509 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1510 if opts[b'base']:
1510 if opts[b'base']:
1511 fromrev = scmutil.revsingle(repo, opts[b'base'])
1511 fromrev = scmutil.revsingle(repo, opts[b'base'])
1512 ancestor = repo[fromrev]
1512 ancestor = repo[fromrev]
1513 else:
1513 else:
1514 ancestor = wctx.ancestor(rctx)
1514 ancestor = wctx.ancestor(rctx)
1515 return (wctx, rctx, ancestor)
1515 return (wctx, rctx, ancestor)
1516
1516
1517
1517
1518 @command(
1518 @command(
1519 b'perf::mergecalculate|perfmergecalculate',
1519 b'perf::mergecalculate|perfmergecalculate',
1520 [
1520 [
1521 (b'r', b'rev', b'.', b'rev to merge against'),
1521 (b'r', b'rev', b'.', b'rev to merge against'),
1522 (b'', b'from', b'', b'rev to merge from'),
1522 (b'', b'from', b'', b'rev to merge from'),
1523 (b'', b'base', b'', b'the revision to use as base'),
1523 (b'', b'base', b'', b'the revision to use as base'),
1524 ]
1524 ]
1525 + formatteropts,
1525 + formatteropts,
1526 )
1526 )
1527 def perfmergecalculate(ui, repo, **opts):
1527 def perfmergecalculate(ui, repo, **opts):
1528 opts = _byteskwargs(opts)
1528 opts = _byteskwargs(opts)
1529 timer, fm = gettimer(ui, opts)
1529 timer, fm = gettimer(ui, opts)
1530
1530
1531 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1531 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1532
1532
1533 def d():
1533 def d():
1534 # acceptremote is True because we don't want prompts in the middle of
1534 # acceptremote is True because we don't want prompts in the middle of
1535 # our benchmark
1535 # our benchmark
1536 merge.calculateupdates(
1536 merge.calculateupdates(
1537 repo,
1537 repo,
1538 wctx,
1538 wctx,
1539 rctx,
1539 rctx,
1540 [ancestor],
1540 [ancestor],
1541 branchmerge=False,
1541 branchmerge=False,
1542 force=False,
1542 force=False,
1543 acceptremote=True,
1543 acceptremote=True,
1544 followcopies=True,
1544 followcopies=True,
1545 )
1545 )
1546
1546
1547 timer(d)
1547 timer(d)
1548 fm.end()
1548 fm.end()
1549
1549
1550
1550
1551 @command(
1551 @command(
1552 b'perf::mergecopies|perfmergecopies',
1552 b'perf::mergecopies|perfmergecopies',
1553 [
1553 [
1554 (b'r', b'rev', b'.', b'rev to merge against'),
1554 (b'r', b'rev', b'.', b'rev to merge against'),
1555 (b'', b'from', b'', b'rev to merge from'),
1555 (b'', b'from', b'', b'rev to merge from'),
1556 (b'', b'base', b'', b'the revision to use as base'),
1556 (b'', b'base', b'', b'the revision to use as base'),
1557 ]
1557 ]
1558 + formatteropts,
1558 + formatteropts,
1559 )
1559 )
1560 def perfmergecopies(ui, repo, **opts):
1560 def perfmergecopies(ui, repo, **opts):
1561 """measure runtime of `copies.mergecopies`"""
1561 """measure runtime of `copies.mergecopies`"""
1562 opts = _byteskwargs(opts)
1562 opts = _byteskwargs(opts)
1563 timer, fm = gettimer(ui, opts)
1563 timer, fm = gettimer(ui, opts)
1564 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1564 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1565
1565
1566 def d():
1566 def d():
1567 # acceptremote is True because we don't want prompts in the middle of
1567 # acceptremote is True because we don't want prompts in the middle of
1568 # our benchmark
1568 # our benchmark
1569 copies.mergecopies(repo, wctx, rctx, ancestor)
1569 copies.mergecopies(repo, wctx, rctx, ancestor)
1570
1570
1571 timer(d)
1571 timer(d)
1572 fm.end()
1572 fm.end()
1573
1573
1574
1574
1575 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1575 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1576 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1576 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1577 """benchmark the copy tracing logic"""
1577 """benchmark the copy tracing logic"""
1578 opts = _byteskwargs(opts)
1578 opts = _byteskwargs(opts)
1579 timer, fm = gettimer(ui, opts)
1579 timer, fm = gettimer(ui, opts)
1580 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1580 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1581 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1581 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1582
1582
1583 def d():
1583 def d():
1584 copies.pathcopies(ctx1, ctx2)
1584 copies.pathcopies(ctx1, ctx2)
1585
1585
1586 timer(d)
1586 timer(d)
1587 fm.end()
1587 fm.end()
1588
1588
1589
1589
1590 @command(
1590 @command(
1591 b'perf::phases|perfphases',
1591 b'perf::phases|perfphases',
1592 [
1592 [
1593 (b'', b'full', False, b'include file reading time too'),
1593 (b'', b'full', False, b'include file reading time too'),
1594 ],
1594 ],
1595 b"",
1595 b"",
1596 )
1596 )
1597 def perfphases(ui, repo, **opts):
1597 def perfphases(ui, repo, **opts):
1598 """benchmark phasesets computation"""
1598 """benchmark phasesets computation"""
1599 opts = _byteskwargs(opts)
1599 opts = _byteskwargs(opts)
1600 timer, fm = gettimer(ui, opts)
1600 timer, fm = gettimer(ui, opts)
1601 _phases = repo._phasecache
1601 _phases = repo._phasecache
1602 full = opts.get(b'full')
1602 full = opts.get(b'full')
1603
1603
1604 def d():
1604 def d():
1605 phases = _phases
1605 phases = _phases
1606 if full:
1606 if full:
1607 clearfilecache(repo, b'_phasecache')
1607 clearfilecache(repo, b'_phasecache')
1608 phases = repo._phasecache
1608 phases = repo._phasecache
1609 phases.invalidate()
1609 phases.invalidate()
1610 phases.loadphaserevs(repo)
1610 phases.loadphaserevs(repo)
1611
1611
1612 timer(d)
1612 timer(d)
1613 fm.end()
1613 fm.end()
1614
1614
1615
1615
1616 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1616 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1617 def perfphasesremote(ui, repo, dest=None, **opts):
1617 def perfphasesremote(ui, repo, dest=None, **opts):
1618 """benchmark time needed to analyse phases of the remote server"""
1618 """benchmark time needed to analyse phases of the remote server"""
1619 from mercurial.node import bin
1619 from mercurial.node import bin
1620 from mercurial import (
1620 from mercurial import (
1621 exchange,
1621 exchange,
1622 hg,
1622 hg,
1623 phases,
1623 phases,
1624 )
1624 )
1625
1625
1626 opts = _byteskwargs(opts)
1626 opts = _byteskwargs(opts)
1627 timer, fm = gettimer(ui, opts)
1627 timer, fm = gettimer(ui, opts)
1628
1628
1629 path = ui.getpath(dest, default=(b'default-push', b'default'))
1629 path = ui.getpath(dest, default=(b'default-push', b'default'))
1630 if not path:
1630 if not path:
1631 raise error.Abort(
1631 raise error.Abort(
1632 b'default repository not configured!',
1632 b'default repository not configured!',
1633 hint=b"see 'hg help config.paths'",
1633 hint=b"see 'hg help config.paths'",
1634 )
1634 )
1635 if util.safehasattr(path, 'main_path'):
1635 if util.safehasattr(path, 'main_path'):
1636 path = path.get_push_variant()
1636 path = path.get_push_variant()
1637 dest = path.loc
1637 dest = path.loc
1638 else:
1638 else:
1639 dest = path.pushloc or path.loc
1639 dest = path.pushloc or path.loc
1640 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1640 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1641 other = hg.peer(repo, opts, dest)
1641 other = hg.peer(repo, opts, dest)
1642
1642
1643 # easier to perform discovery through the operation
1643 # easier to perform discovery through the operation
1644 op = exchange.pushoperation(repo, other)
1644 op = exchange.pushoperation(repo, other)
1645 exchange._pushdiscoverychangeset(op)
1645 exchange._pushdiscoverychangeset(op)
1646
1646
1647 remotesubset = op.fallbackheads
1647 remotesubset = op.fallbackheads
1648
1648
1649 with other.commandexecutor() as e:
1649 with other.commandexecutor() as e:
1650 remotephases = e.callcommand(
1650 remotephases = e.callcommand(
1651 b'listkeys', {b'namespace': b'phases'}
1651 b'listkeys', {b'namespace': b'phases'}
1652 ).result()
1652 ).result()
1653 del other
1653 del other
1654 publishing = remotephases.get(b'publishing', False)
1654 publishing = remotephases.get(b'publishing', False)
1655 if publishing:
1655 if publishing:
1656 ui.statusnoi18n(b'publishing: yes\n')
1656 ui.statusnoi18n(b'publishing: yes\n')
1657 else:
1657 else:
1658 ui.statusnoi18n(b'publishing: no\n')
1658 ui.statusnoi18n(b'publishing: no\n')
1659
1659
1660 has_node = getattr(repo.changelog.index, 'has_node', None)
1660 has_node = getattr(repo.changelog.index, 'has_node', None)
1661 if has_node is None:
1661 if has_node is None:
1662 has_node = repo.changelog.nodemap.__contains__
1662 has_node = repo.changelog.nodemap.__contains__
1663 nonpublishroots = 0
1663 nonpublishroots = 0
1664 for nhex, phase in remotephases.iteritems():
1664 for nhex, phase in remotephases.iteritems():
1665 if nhex == b'publishing': # ignore data related to publish option
1665 if nhex == b'publishing': # ignore data related to publish option
1666 continue
1666 continue
1667 node = bin(nhex)
1667 node = bin(nhex)
1668 if has_node(node) and int(phase):
1668 if has_node(node) and int(phase):
1669 nonpublishroots += 1
1669 nonpublishroots += 1
1670 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1670 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1671 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1671 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1672
1672
1673 def d():
1673 def d():
1674 phases.remotephasessummary(repo, remotesubset, remotephases)
1674 phases.remotephasessummary(repo, remotesubset, remotephases)
1675
1675
1676 timer(d)
1676 timer(d)
1677 fm.end()
1677 fm.end()
1678
1678
1679
1679
1680 @command(
1680 @command(
1681 b'perf::manifest|perfmanifest',
1681 b'perf::manifest|perfmanifest',
1682 [
1682 [
1683 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1683 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1684 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1684 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1685 ]
1685 ]
1686 + formatteropts,
1686 + formatteropts,
1687 b'REV|NODE',
1687 b'REV|NODE',
1688 )
1688 )
1689 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1689 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1690 """benchmark the time to read a manifest from disk and return a usable
1690 """benchmark the time to read a manifest from disk and return a usable
1691 dict-like object
1691 dict-like object
1692
1692
1693 Manifest caches are cleared before retrieval."""
1693 Manifest caches are cleared before retrieval."""
1694 opts = _byteskwargs(opts)
1694 opts = _byteskwargs(opts)
1695 timer, fm = gettimer(ui, opts)
1695 timer, fm = gettimer(ui, opts)
1696 if not manifest_rev:
1696 if not manifest_rev:
1697 ctx = scmutil.revsingle(repo, rev, rev)
1697 ctx = scmutil.revsingle(repo, rev, rev)
1698 t = ctx.manifestnode()
1698 t = ctx.manifestnode()
1699 else:
1699 else:
1700 from mercurial.node import bin
1700 from mercurial.node import bin
1701
1701
1702 if len(rev) == 40:
1702 if len(rev) == 40:
1703 t = bin(rev)
1703 t = bin(rev)
1704 else:
1704 else:
1705 try:
1705 try:
1706 rev = int(rev)
1706 rev = int(rev)
1707
1707
1708 if util.safehasattr(repo.manifestlog, b'getstorage'):
1708 if util.safehasattr(repo.manifestlog, b'getstorage'):
1709 t = repo.manifestlog.getstorage(b'').node(rev)
1709 t = repo.manifestlog.getstorage(b'').node(rev)
1710 else:
1710 else:
1711 t = repo.manifestlog._revlog.lookup(rev)
1711 t = repo.manifestlog._revlog.lookup(rev)
1712 except ValueError:
1712 except ValueError:
1713 raise error.Abort(
1713 raise error.Abort(
1714 b'manifest revision must be integer or full node'
1714 b'manifest revision must be integer or full node'
1715 )
1715 )
1716
1716
1717 def d():
1717 def d():
1718 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1718 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1719 repo.manifestlog[t].read()
1719 repo.manifestlog[t].read()
1720
1720
1721 timer(d)
1721 timer(d)
1722 fm.end()
1722 fm.end()
1723
1723
1724
1724
1725 @command(b'perf::changeset|perfchangeset', formatteropts)
1725 @command(b'perf::changeset|perfchangeset', formatteropts)
1726 def perfchangeset(ui, repo, rev, **opts):
1726 def perfchangeset(ui, repo, rev, **opts):
1727 opts = _byteskwargs(opts)
1727 opts = _byteskwargs(opts)
1728 timer, fm = gettimer(ui, opts)
1728 timer, fm = gettimer(ui, opts)
1729 n = scmutil.revsingle(repo, rev).node()
1729 n = scmutil.revsingle(repo, rev).node()
1730
1730
1731 def d():
1731 def d():
1732 repo.changelog.read(n)
1732 repo.changelog.read(n)
1733 # repo.changelog._cache = None
1733 # repo.changelog._cache = None
1734
1734
1735 timer(d)
1735 timer(d)
1736 fm.end()
1736 fm.end()
1737
1737
1738
1738
1739 @command(b'perf::ignore|perfignore', formatteropts)
1739 @command(b'perf::ignore|perfignore', formatteropts)
1740 def perfignore(ui, repo, **opts):
1740 def perfignore(ui, repo, **opts):
1741 """benchmark operation related to computing ignore"""
1741 """benchmark operation related to computing ignore"""
1742 opts = _byteskwargs(opts)
1742 opts = _byteskwargs(opts)
1743 timer, fm = gettimer(ui, opts)
1743 timer, fm = gettimer(ui, opts)
1744 dirstate = repo.dirstate
1744 dirstate = repo.dirstate
1745
1745
1746 def setupone():
1746 def setupone():
1747 dirstate.invalidate()
1747 dirstate.invalidate()
1748 clearfilecache(dirstate, b'_ignore')
1748 clearfilecache(dirstate, b'_ignore')
1749
1749
1750 def runone():
1750 def runone():
1751 dirstate._ignore
1751 dirstate._ignore
1752
1752
1753 timer(runone, setup=setupone, title=b"load")
1753 timer(runone, setup=setupone, title=b"load")
1754 fm.end()
1754 fm.end()
1755
1755
1756
1756
1757 @command(
1757 @command(
1758 b'perf::index|perfindex',
1758 b'perf::index|perfindex',
1759 [
1759 [
1760 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1760 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1761 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1761 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1762 ]
1762 ]
1763 + formatteropts,
1763 + formatteropts,
1764 )
1764 )
1765 def perfindex(ui, repo, **opts):
1765 def perfindex(ui, repo, **opts):
1766 """benchmark index creation time followed by a lookup
1766 """benchmark index creation time followed by a lookup
1767
1767
1768 The default is to look `tip` up. Depending on the index implementation,
1768 The default is to look `tip` up. Depending on the index implementation,
1769 the revision looked up can matters. For example, an implementation
1769 the revision looked up can matters. For example, an implementation
1770 scanning the index will have a faster lookup time for `--rev tip` than for
1770 scanning the index will have a faster lookup time for `--rev tip` than for
1771 `--rev 0`. The number of looked up revisions and their order can also
1771 `--rev 0`. The number of looked up revisions and their order can also
1772 matters.
1772 matters.
1773
1773
1774 Example of useful set to test:
1774 Example of useful set to test:
1775
1775
1776 * tip
1776 * tip
1777 * 0
1777 * 0
1778 * -10:
1778 * -10:
1779 * :10
1779 * :10
1780 * -10: + :10
1780 * -10: + :10
1781 * :10: + -10:
1781 * :10: + -10:
1782 * -10000:
1782 * -10000:
1783 * -10000: + 0
1783 * -10000: + 0
1784
1784
1785 It is not currently possible to check for lookup of a missing node. For
1785 It is not currently possible to check for lookup of a missing node. For
1786 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1786 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1787 import mercurial.revlog
1787 import mercurial.revlog
1788
1788
1789 opts = _byteskwargs(opts)
1789 opts = _byteskwargs(opts)
1790 timer, fm = gettimer(ui, opts)
1790 timer, fm = gettimer(ui, opts)
1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1792 if opts[b'no_lookup']:
1792 if opts[b'no_lookup']:
1793 if opts['rev']:
1793 if opts['rev']:
1794 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1794 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1795 nodes = []
1795 nodes = []
1796 elif not opts[b'rev']:
1796 elif not opts[b'rev']:
1797 nodes = [repo[b"tip"].node()]
1797 nodes = [repo[b"tip"].node()]
1798 else:
1798 else:
1799 revs = scmutil.revrange(repo, opts[b'rev'])
1799 revs = scmutil.revrange(repo, opts[b'rev'])
1800 cl = repo.changelog
1800 cl = repo.changelog
1801 nodes = [cl.node(r) for r in revs]
1801 nodes = [cl.node(r) for r in revs]
1802
1802
1803 unfi = repo.unfiltered()
1803 unfi = repo.unfiltered()
1804 # find the filecache func directly
1804 # find the filecache func directly
1805 # This avoid polluting the benchmark with the filecache logic
1805 # This avoid polluting the benchmark with the filecache logic
1806 makecl = unfi.__class__.changelog.func
1806 makecl = unfi.__class__.changelog.func
1807
1807
1808 def setup():
1808 def setup():
1809 # probably not necessary, but for good measure
1809 # probably not necessary, but for good measure
1810 clearchangelog(unfi)
1810 clearchangelog(unfi)
1811
1811
1812 def d():
1812 def d():
1813 cl = makecl(unfi)
1813 cl = makecl(unfi)
1814 for n in nodes:
1814 for n in nodes:
1815 cl.rev(n)
1815 cl.rev(n)
1816
1816
1817 timer(d, setup=setup)
1817 timer(d, setup=setup)
1818 fm.end()
1818 fm.end()
1819
1819
1820
1820
1821 @command(
1821 @command(
1822 b'perf::nodemap|perfnodemap',
1822 b'perf::nodemap|perfnodemap',
1823 [
1823 [
1824 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1824 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1825 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1825 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1826 ]
1826 ]
1827 + formatteropts,
1827 + formatteropts,
1828 )
1828 )
1829 def perfnodemap(ui, repo, **opts):
1829 def perfnodemap(ui, repo, **opts):
1830 """benchmark the time necessary to look up revision from a cold nodemap
1830 """benchmark the time necessary to look up revision from a cold nodemap
1831
1831
1832 Depending on the implementation, the amount and order of revision we look
1832 Depending on the implementation, the amount and order of revision we look
1833 up can varies. Example of useful set to test:
1833 up can varies. Example of useful set to test:
1834 * tip
1834 * tip
1835 * 0
1835 * 0
1836 * -10:
1836 * -10:
1837 * :10
1837 * :10
1838 * -10: + :10
1838 * -10: + :10
1839 * :10: + -10:
1839 * :10: + -10:
1840 * -10000:
1840 * -10000:
1841 * -10000: + 0
1841 * -10000: + 0
1842
1842
1843 The command currently focus on valid binary lookup. Benchmarking for
1843 The command currently focus on valid binary lookup. Benchmarking for
1844 hexlookup, prefix lookup and missing lookup would also be valuable.
1844 hexlookup, prefix lookup and missing lookup would also be valuable.
1845 """
1845 """
1846 import mercurial.revlog
1846 import mercurial.revlog
1847
1847
1848 opts = _byteskwargs(opts)
1848 opts = _byteskwargs(opts)
1849 timer, fm = gettimer(ui, opts)
1849 timer, fm = gettimer(ui, opts)
1850 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1850 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1851
1851
1852 unfi = repo.unfiltered()
1852 unfi = repo.unfiltered()
1853 clearcaches = opts[b'clear_caches']
1853 clearcaches = opts[b'clear_caches']
1854 # find the filecache func directly
1854 # find the filecache func directly
1855 # This avoid polluting the benchmark with the filecache logic
1855 # This avoid polluting the benchmark with the filecache logic
1856 makecl = unfi.__class__.changelog.func
1856 makecl = unfi.__class__.changelog.func
1857 if not opts[b'rev']:
1857 if not opts[b'rev']:
1858 raise error.Abort(b'use --rev to specify revisions to look up')
1858 raise error.Abort(b'use --rev to specify revisions to look up')
1859 revs = scmutil.revrange(repo, opts[b'rev'])
1859 revs = scmutil.revrange(repo, opts[b'rev'])
1860 cl = repo.changelog
1860 cl = repo.changelog
1861 nodes = [cl.node(r) for r in revs]
1861 nodes = [cl.node(r) for r in revs]
1862
1862
1863 # use a list to pass reference to a nodemap from one closure to the next
1863 # use a list to pass reference to a nodemap from one closure to the next
1864 nodeget = [None]
1864 nodeget = [None]
1865
1865
1866 def setnodeget():
1866 def setnodeget():
1867 # probably not necessary, but for good measure
1867 # probably not necessary, but for good measure
1868 clearchangelog(unfi)
1868 clearchangelog(unfi)
1869 cl = makecl(unfi)
1869 cl = makecl(unfi)
1870 if util.safehasattr(cl.index, 'get_rev'):
1870 if util.safehasattr(cl.index, 'get_rev'):
1871 nodeget[0] = cl.index.get_rev
1871 nodeget[0] = cl.index.get_rev
1872 else:
1872 else:
1873 nodeget[0] = cl.nodemap.get
1873 nodeget[0] = cl.nodemap.get
1874
1874
1875 def d():
1875 def d():
1876 get = nodeget[0]
1876 get = nodeget[0]
1877 for n in nodes:
1877 for n in nodes:
1878 get(n)
1878 get(n)
1879
1879
1880 setup = None
1880 setup = None
1881 if clearcaches:
1881 if clearcaches:
1882
1882
1883 def setup():
1883 def setup():
1884 setnodeget()
1884 setnodeget()
1885
1885
1886 else:
1886 else:
1887 setnodeget()
1887 setnodeget()
1888 d() # prewarm the data structure
1888 d() # prewarm the data structure
1889 timer(d, setup=setup)
1889 timer(d, setup=setup)
1890 fm.end()
1890 fm.end()
1891
1891
1892
1892
1893 @command(b'perf::startup|perfstartup', formatteropts)
1893 @command(b'perf::startup|perfstartup', formatteropts)
1894 def perfstartup(ui, repo, **opts):
1894 def perfstartup(ui, repo, **opts):
1895 opts = _byteskwargs(opts)
1895 opts = _byteskwargs(opts)
1896 timer, fm = gettimer(ui, opts)
1896 timer, fm = gettimer(ui, opts)
1897
1897
1898 def d():
1898 def d():
1899 if os.name != 'nt':
1899 if os.name != 'nt':
1900 os.system(
1900 os.system(
1901 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1901 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1902 )
1902 )
1903 else:
1903 else:
1904 os.environ['HGRCPATH'] = r' '
1904 os.environ['HGRCPATH'] = r' '
1905 os.system("%s version -q > NUL" % sys.argv[0])
1905 os.system("%s version -q > NUL" % sys.argv[0])
1906
1906
1907 timer(d)
1907 timer(d)
1908 fm.end()
1908 fm.end()
1909
1909
1910
1910
1911 def _find_stream_generator(version):
1911 def _find_stream_generator(version):
1912 """find the proper generator function for this stream version"""
1912 """find the proper generator function for this stream version"""
1913 import mercurial.streamclone
1913 import mercurial.streamclone
1914
1914
1915 available = {}
1915 available = {}
1916
1916
1917 # try to fetch a v1 generator
1917 # try to fetch a v1 generator
1918 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1918 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1919 if generatev1 is not None:
1919 if generatev1 is not None:
1920
1920
1921 def generate(repo):
1921 def generate(repo):
1922 entries, bytes, data = generatev2(repo, None, None, True)
1922 entries, bytes, data = generatev2(repo, None, None, True)
1923 return data
1923 return data
1924
1924
1925 available[b'v1'] = generatev1
1925 available[b'v1'] = generatev1
1926 # try to fetch a v2 generator
1926 # try to fetch a v2 generator
1927 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1927 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1928 if generatev2 is not None:
1928 if generatev2 is not None:
1929
1929
1930 def generate(repo):
1930 def generate(repo):
1931 entries, bytes, data = generatev2(repo, None, None, True)
1931 entries, bytes, data = generatev2(repo, None, None, True)
1932 return data
1932 return data
1933
1933
1934 available[b'v2'] = generate
1934 available[b'v2'] = generate
1935 # try to fetch a v3 generator
1935 # try to fetch a v3 generator
1936 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1936 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1937 if generatev3 is not None:
1937 if generatev3 is not None:
1938
1938
1939 def generate(repo):
1939 def generate(repo):
1940 entries, bytes, data = generatev3(repo, None, None, True)
1940 entries, bytes, data = generatev3(repo, None, None, True)
1941 return data
1941 return data
1942
1942
1943 available[b'v3-exp'] = generate
1943 available[b'v3-exp'] = generate
1944
1944
1945 # resolve the request
1945 # resolve the request
1946 if version == b"latest":
1946 if version == b"latest":
1947 # latest is the highest non experimental version
1947 # latest is the highest non experimental version
1948 latest_key = max(v for v in available if b'-exp' not in v)
1948 latest_key = max(v for v in available if b'-exp' not in v)
1949 return available[latest_key]
1949 return available[latest_key]
1950 elif version in available:
1950 elif version in available:
1951 return available[version]
1951 return available[version]
1952 else:
1952 else:
1953 msg = b"unkown or unavailable version: %s"
1953 msg = b"unkown or unavailable version: %s"
1954 msg %= version
1954 msg %= version
1955 hint = b"available versions: %s"
1955 hint = b"available versions: %s"
1956 hint %= b', '.join(sorted(available))
1956 hint %= b', '.join(sorted(available))
1957 raise error.Abort(msg, hint=hint)
1957 raise error.Abort(msg, hint=hint)
1958
1958
1959
1959
1960 @command(
1960 @command(
1961 b'perf::stream-locked-section',
1961 b'perf::stream-locked-section',
1962 [
1962 [
1963 (
1963 (
1964 b'',
1964 b'',
1965 b'stream-version',
1965 b'stream-version',
1966 b'latest',
1966 b'latest',
1967 b'stream version to us ("v1", "v2" or "latest", (the default))',
1967 b'stream version to us ("v1", "v2" or "latest", (the default))',
1968 ),
1968 ),
1969 ]
1969 ]
1970 + formatteropts,
1970 + formatteropts,
1971 )
1971 )
1972 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1972 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1973 """benchmark the initial, repo-locked, section of a stream-clone"""
1973 """benchmark the initial, repo-locked, section of a stream-clone"""
1974
1974
1975 opts = _byteskwargs(opts)
1975 opts = _byteskwargs(opts)
1976 timer, fm = gettimer(ui, opts)
1976 timer, fm = gettimer(ui, opts)
1977
1977
1978 # deletion of the generator may trigger some cleanup that we do not want to
1978 # deletion of the generator may trigger some cleanup that we do not want to
1979 # measure
1979 # measure
1980 result_holder = [None]
1980 result_holder = [None]
1981
1981
1982 def setupone():
1982 def setupone():
1983 result_holder[0] = None
1983 result_holder[0] = None
1984
1984
1985 generate = _find_stream_generator(stream_version)
1985 generate = _find_stream_generator(stream_version)
1986
1986
1987 def runone():
1987 def runone():
1988 # the lock is held for the duration the initialisation
1988 # the lock is held for the duration the initialisation
1989 result_holder[0] = generate(repo)
1989 result_holder[0] = generate(repo)
1990
1990
1991 timer(runone, setup=setupone, title=b"load")
1991 timer(runone, setup=setupone, title=b"load")
1992 fm.end()
1992 fm.end()
1993
1993
1994
1994
1995 @command(
1996 b'perf::stream-generate',
1997 [
1998 (
1999 b'',
2000 b'stream-version',
2001 b'latest',
2002 b'stream version to us ("v1", "v2" or "latest", (the default))',
2003 ),
2004 ]
2005 + formatteropts,
2006 )
2007 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2008 """benchmark the full generation of a stream clone"""
2009
2010 opts = _byteskwargs(opts)
2011 timer, fm = gettimer(ui, opts)
2012
2013 # deletion of the generator may trigger some cleanup that we do not want to
2014 # measure
2015
2016 generate = _find_stream_generator(stream_version)
2017
2018 def runone():
2019 # the lock is held for the duration the initialisation
2020 for chunk in generate(repo):
2021 pass
2022
2023 timer(runone, title=b"generate")
2024 fm.end()
2025
2026
1995 @command(b'perf::parents|perfparents', formatteropts)
2027 @command(b'perf::parents|perfparents', formatteropts)
1996 def perfparents(ui, repo, **opts):
2028 def perfparents(ui, repo, **opts):
1997 """benchmark the time necessary to fetch one changeset's parents.
2029 """benchmark the time necessary to fetch one changeset's parents.
1998
2030
1999 The fetch is done using the `node identifier`, traversing all object layers
2031 The fetch is done using the `node identifier`, traversing all object layers
2000 from the repository object. The first N revisions will be used for this
2032 from the repository object. The first N revisions will be used for this
2001 benchmark. N is controlled by the ``perf.parentscount`` config option
2033 benchmark. N is controlled by the ``perf.parentscount`` config option
2002 (default: 1000).
2034 (default: 1000).
2003 """
2035 """
2004 opts = _byteskwargs(opts)
2036 opts = _byteskwargs(opts)
2005 timer, fm = gettimer(ui, opts)
2037 timer, fm = gettimer(ui, opts)
2006 # control the number of commits perfparents iterates over
2038 # control the number of commits perfparents iterates over
2007 # experimental config: perf.parentscount
2039 # experimental config: perf.parentscount
2008 count = getint(ui, b"perf", b"parentscount", 1000)
2040 count = getint(ui, b"perf", b"parentscount", 1000)
2009 if len(repo.changelog) < count:
2041 if len(repo.changelog) < count:
2010 raise error.Abort(b"repo needs %d commits for this test" % count)
2042 raise error.Abort(b"repo needs %d commits for this test" % count)
2011 repo = repo.unfiltered()
2043 repo = repo.unfiltered()
2012 nl = [repo.changelog.node(i) for i in _xrange(count)]
2044 nl = [repo.changelog.node(i) for i in _xrange(count)]
2013
2045
2014 def d():
2046 def d():
2015 for n in nl:
2047 for n in nl:
2016 repo.changelog.parents(n)
2048 repo.changelog.parents(n)
2017
2049
2018 timer(d)
2050 timer(d)
2019 fm.end()
2051 fm.end()
2020
2052
2021
2053
2022 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2054 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2023 def perfctxfiles(ui, repo, x, **opts):
2055 def perfctxfiles(ui, repo, x, **opts):
2024 opts = _byteskwargs(opts)
2056 opts = _byteskwargs(opts)
2025 x = int(x)
2057 x = int(x)
2026 timer, fm = gettimer(ui, opts)
2058 timer, fm = gettimer(ui, opts)
2027
2059
2028 def d():
2060 def d():
2029 len(repo[x].files())
2061 len(repo[x].files())
2030
2062
2031 timer(d)
2063 timer(d)
2032 fm.end()
2064 fm.end()
2033
2065
2034
2066
2035 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2067 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2036 def perfrawfiles(ui, repo, x, **opts):
2068 def perfrawfiles(ui, repo, x, **opts):
2037 opts = _byteskwargs(opts)
2069 opts = _byteskwargs(opts)
2038 x = int(x)
2070 x = int(x)
2039 timer, fm = gettimer(ui, opts)
2071 timer, fm = gettimer(ui, opts)
2040 cl = repo.changelog
2072 cl = repo.changelog
2041
2073
2042 def d():
2074 def d():
2043 len(cl.read(x)[3])
2075 len(cl.read(x)[3])
2044
2076
2045 timer(d)
2077 timer(d)
2046 fm.end()
2078 fm.end()
2047
2079
2048
2080
2049 @command(b'perf::lookup|perflookup', formatteropts)
2081 @command(b'perf::lookup|perflookup', formatteropts)
2050 def perflookup(ui, repo, rev, **opts):
2082 def perflookup(ui, repo, rev, **opts):
2051 opts = _byteskwargs(opts)
2083 opts = _byteskwargs(opts)
2052 timer, fm = gettimer(ui, opts)
2084 timer, fm = gettimer(ui, opts)
2053 timer(lambda: len(repo.lookup(rev)))
2085 timer(lambda: len(repo.lookup(rev)))
2054 fm.end()
2086 fm.end()
2055
2087
2056
2088
2057 @command(
2089 @command(
2058 b'perf::linelogedits|perflinelogedits',
2090 b'perf::linelogedits|perflinelogedits',
2059 [
2091 [
2060 (b'n', b'edits', 10000, b'number of edits'),
2092 (b'n', b'edits', 10000, b'number of edits'),
2061 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2093 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2062 ],
2094 ],
2063 norepo=True,
2095 norepo=True,
2064 )
2096 )
2065 def perflinelogedits(ui, **opts):
2097 def perflinelogedits(ui, **opts):
2066 from mercurial import linelog
2098 from mercurial import linelog
2067
2099
2068 opts = _byteskwargs(opts)
2100 opts = _byteskwargs(opts)
2069
2101
2070 edits = opts[b'edits']
2102 edits = opts[b'edits']
2071 maxhunklines = opts[b'max_hunk_lines']
2103 maxhunklines = opts[b'max_hunk_lines']
2072
2104
2073 maxb1 = 100000
2105 maxb1 = 100000
2074 random.seed(0)
2106 random.seed(0)
2075 randint = random.randint
2107 randint = random.randint
2076 currentlines = 0
2108 currentlines = 0
2077 arglist = []
2109 arglist = []
2078 for rev in _xrange(edits):
2110 for rev in _xrange(edits):
2079 a1 = randint(0, currentlines)
2111 a1 = randint(0, currentlines)
2080 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2112 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2081 b1 = randint(0, maxb1)
2113 b1 = randint(0, maxb1)
2082 b2 = randint(b1, b1 + maxhunklines)
2114 b2 = randint(b1, b1 + maxhunklines)
2083 currentlines += (b2 - b1) - (a2 - a1)
2115 currentlines += (b2 - b1) - (a2 - a1)
2084 arglist.append((rev, a1, a2, b1, b2))
2116 arglist.append((rev, a1, a2, b1, b2))
2085
2117
2086 def d():
2118 def d():
2087 ll = linelog.linelog()
2119 ll = linelog.linelog()
2088 for args in arglist:
2120 for args in arglist:
2089 ll.replacelines(*args)
2121 ll.replacelines(*args)
2090
2122
2091 timer, fm = gettimer(ui, opts)
2123 timer, fm = gettimer(ui, opts)
2092 timer(d)
2124 timer(d)
2093 fm.end()
2125 fm.end()
2094
2126
2095
2127
2096 @command(b'perf::revrange|perfrevrange', formatteropts)
2128 @command(b'perf::revrange|perfrevrange', formatteropts)
2097 def perfrevrange(ui, repo, *specs, **opts):
2129 def perfrevrange(ui, repo, *specs, **opts):
2098 opts = _byteskwargs(opts)
2130 opts = _byteskwargs(opts)
2099 timer, fm = gettimer(ui, opts)
2131 timer, fm = gettimer(ui, opts)
2100 revrange = scmutil.revrange
2132 revrange = scmutil.revrange
2101 timer(lambda: len(revrange(repo, specs)))
2133 timer(lambda: len(revrange(repo, specs)))
2102 fm.end()
2134 fm.end()
2103
2135
2104
2136
2105 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2137 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2106 def perfnodelookup(ui, repo, rev, **opts):
2138 def perfnodelookup(ui, repo, rev, **opts):
2107 opts = _byteskwargs(opts)
2139 opts = _byteskwargs(opts)
2108 timer, fm = gettimer(ui, opts)
2140 timer, fm = gettimer(ui, opts)
2109 import mercurial.revlog
2141 import mercurial.revlog
2110
2142
2111 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2143 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2112 n = scmutil.revsingle(repo, rev).node()
2144 n = scmutil.revsingle(repo, rev).node()
2113
2145
2114 try:
2146 try:
2115 cl = revlog(getsvfs(repo), radix=b"00changelog")
2147 cl = revlog(getsvfs(repo), radix=b"00changelog")
2116 except TypeError:
2148 except TypeError:
2117 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2149 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2118
2150
2119 def d():
2151 def d():
2120 cl.rev(n)
2152 cl.rev(n)
2121 clearcaches(cl)
2153 clearcaches(cl)
2122
2154
2123 timer(d)
2155 timer(d)
2124 fm.end()
2156 fm.end()
2125
2157
2126
2158
2127 @command(
2159 @command(
2128 b'perf::log|perflog',
2160 b'perf::log|perflog',
2129 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2161 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2130 )
2162 )
2131 def perflog(ui, repo, rev=None, **opts):
2163 def perflog(ui, repo, rev=None, **opts):
2132 opts = _byteskwargs(opts)
2164 opts = _byteskwargs(opts)
2133 if rev is None:
2165 if rev is None:
2134 rev = []
2166 rev = []
2135 timer, fm = gettimer(ui, opts)
2167 timer, fm = gettimer(ui, opts)
2136 ui.pushbuffer()
2168 ui.pushbuffer()
2137 timer(
2169 timer(
2138 lambda: commands.log(
2170 lambda: commands.log(
2139 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2171 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2140 )
2172 )
2141 )
2173 )
2142 ui.popbuffer()
2174 ui.popbuffer()
2143 fm.end()
2175 fm.end()
2144
2176
2145
2177
2146 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2178 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2147 def perfmoonwalk(ui, repo, **opts):
2179 def perfmoonwalk(ui, repo, **opts):
2148 """benchmark walking the changelog backwards
2180 """benchmark walking the changelog backwards
2149
2181
2150 This also loads the changelog data for each revision in the changelog.
2182 This also loads the changelog data for each revision in the changelog.
2151 """
2183 """
2152 opts = _byteskwargs(opts)
2184 opts = _byteskwargs(opts)
2153 timer, fm = gettimer(ui, opts)
2185 timer, fm = gettimer(ui, opts)
2154
2186
2155 def moonwalk():
2187 def moonwalk():
2156 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2188 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2157 ctx = repo[i]
2189 ctx = repo[i]
2158 ctx.branch() # read changelog data (in addition to the index)
2190 ctx.branch() # read changelog data (in addition to the index)
2159
2191
2160 timer(moonwalk)
2192 timer(moonwalk)
2161 fm.end()
2193 fm.end()
2162
2194
2163
2195
2164 @command(
2196 @command(
2165 b'perf::templating|perftemplating',
2197 b'perf::templating|perftemplating',
2166 [
2198 [
2167 (b'r', b'rev', [], b'revisions to run the template on'),
2199 (b'r', b'rev', [], b'revisions to run the template on'),
2168 ]
2200 ]
2169 + formatteropts,
2201 + formatteropts,
2170 )
2202 )
2171 def perftemplating(ui, repo, testedtemplate=None, **opts):
2203 def perftemplating(ui, repo, testedtemplate=None, **opts):
2172 """test the rendering time of a given template"""
2204 """test the rendering time of a given template"""
2173 if makelogtemplater is None:
2205 if makelogtemplater is None:
2174 raise error.Abort(
2206 raise error.Abort(
2175 b"perftemplating not available with this Mercurial",
2207 b"perftemplating not available with this Mercurial",
2176 hint=b"use 4.3 or later",
2208 hint=b"use 4.3 or later",
2177 )
2209 )
2178
2210
2179 opts = _byteskwargs(opts)
2211 opts = _byteskwargs(opts)
2180
2212
2181 nullui = ui.copy()
2213 nullui = ui.copy()
2182 nullui.fout = open(os.devnull, 'wb')
2214 nullui.fout = open(os.devnull, 'wb')
2183 nullui.disablepager()
2215 nullui.disablepager()
2184 revs = opts.get(b'rev')
2216 revs = opts.get(b'rev')
2185 if not revs:
2217 if not revs:
2186 revs = [b'all()']
2218 revs = [b'all()']
2187 revs = list(scmutil.revrange(repo, revs))
2219 revs = list(scmutil.revrange(repo, revs))
2188
2220
2189 defaulttemplate = (
2221 defaulttemplate = (
2190 b'{date|shortdate} [{rev}:{node|short}]'
2222 b'{date|shortdate} [{rev}:{node|short}]'
2191 b' {author|person}: {desc|firstline}\n'
2223 b' {author|person}: {desc|firstline}\n'
2192 )
2224 )
2193 if testedtemplate is None:
2225 if testedtemplate is None:
2194 testedtemplate = defaulttemplate
2226 testedtemplate = defaulttemplate
2195 displayer = makelogtemplater(nullui, repo, testedtemplate)
2227 displayer = makelogtemplater(nullui, repo, testedtemplate)
2196
2228
2197 def format():
2229 def format():
2198 for r in revs:
2230 for r in revs:
2199 ctx = repo[r]
2231 ctx = repo[r]
2200 displayer.show(ctx)
2232 displayer.show(ctx)
2201 displayer.flush(ctx)
2233 displayer.flush(ctx)
2202
2234
2203 timer, fm = gettimer(ui, opts)
2235 timer, fm = gettimer(ui, opts)
2204 timer(format)
2236 timer(format)
2205 fm.end()
2237 fm.end()
2206
2238
2207
2239
2208 def _displaystats(ui, opts, entries, data):
2240 def _displaystats(ui, opts, entries, data):
2209 # use a second formatter because the data are quite different, not sure
2241 # use a second formatter because the data are quite different, not sure
2210 # how it flies with the templater.
2242 # how it flies with the templater.
2211 fm = ui.formatter(b'perf-stats', opts)
2243 fm = ui.formatter(b'perf-stats', opts)
2212 for key, title in entries:
2244 for key, title in entries:
2213 values = data[key]
2245 values = data[key]
2214 nbvalues = len(data)
2246 nbvalues = len(data)
2215 values.sort()
2247 values.sort()
2216 stats = {
2248 stats = {
2217 'key': key,
2249 'key': key,
2218 'title': title,
2250 'title': title,
2219 'nbitems': len(values),
2251 'nbitems': len(values),
2220 'min': values[0][0],
2252 'min': values[0][0],
2221 '10%': values[(nbvalues * 10) // 100][0],
2253 '10%': values[(nbvalues * 10) // 100][0],
2222 '25%': values[(nbvalues * 25) // 100][0],
2254 '25%': values[(nbvalues * 25) // 100][0],
2223 '50%': values[(nbvalues * 50) // 100][0],
2255 '50%': values[(nbvalues * 50) // 100][0],
2224 '75%': values[(nbvalues * 75) // 100][0],
2256 '75%': values[(nbvalues * 75) // 100][0],
2225 '80%': values[(nbvalues * 80) // 100][0],
2257 '80%': values[(nbvalues * 80) // 100][0],
2226 '85%': values[(nbvalues * 85) // 100][0],
2258 '85%': values[(nbvalues * 85) // 100][0],
2227 '90%': values[(nbvalues * 90) // 100][0],
2259 '90%': values[(nbvalues * 90) // 100][0],
2228 '95%': values[(nbvalues * 95) // 100][0],
2260 '95%': values[(nbvalues * 95) // 100][0],
2229 '99%': values[(nbvalues * 99) // 100][0],
2261 '99%': values[(nbvalues * 99) // 100][0],
2230 'max': values[-1][0],
2262 'max': values[-1][0],
2231 }
2263 }
2232 fm.startitem()
2264 fm.startitem()
2233 fm.data(**stats)
2265 fm.data(**stats)
2234 # make node pretty for the human output
2266 # make node pretty for the human output
2235 fm.plain('### %s (%d items)\n' % (title, len(values)))
2267 fm.plain('### %s (%d items)\n' % (title, len(values)))
2236 lines = [
2268 lines = [
2237 'min',
2269 'min',
2238 '10%',
2270 '10%',
2239 '25%',
2271 '25%',
2240 '50%',
2272 '50%',
2241 '75%',
2273 '75%',
2242 '80%',
2274 '80%',
2243 '85%',
2275 '85%',
2244 '90%',
2276 '90%',
2245 '95%',
2277 '95%',
2246 '99%',
2278 '99%',
2247 'max',
2279 'max',
2248 ]
2280 ]
2249 for l in lines:
2281 for l in lines:
2250 fm.plain('%s: %s\n' % (l, stats[l]))
2282 fm.plain('%s: %s\n' % (l, stats[l]))
2251 fm.end()
2283 fm.end()
2252
2284
2253
2285
2254 @command(
2286 @command(
2255 b'perf::helper-mergecopies|perfhelper-mergecopies',
2287 b'perf::helper-mergecopies|perfhelper-mergecopies',
2256 formatteropts
2288 formatteropts
2257 + [
2289 + [
2258 (b'r', b'revs', [], b'restrict search to these revisions'),
2290 (b'r', b'revs', [], b'restrict search to these revisions'),
2259 (b'', b'timing', False, b'provides extra data (costly)'),
2291 (b'', b'timing', False, b'provides extra data (costly)'),
2260 (b'', b'stats', False, b'provides statistic about the measured data'),
2292 (b'', b'stats', False, b'provides statistic about the measured data'),
2261 ],
2293 ],
2262 )
2294 )
2263 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2295 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2264 """find statistics about potential parameters for `perfmergecopies`
2296 """find statistics about potential parameters for `perfmergecopies`
2265
2297
2266 This command find (base, p1, p2) triplet relevant for copytracing
2298 This command find (base, p1, p2) triplet relevant for copytracing
2267 benchmarking in the context of a merge. It reports values for some of the
2299 benchmarking in the context of a merge. It reports values for some of the
2268 parameters that impact merge copy tracing time during merge.
2300 parameters that impact merge copy tracing time during merge.
2269
2301
2270 If `--timing` is set, rename detection is run and the associated timing
2302 If `--timing` is set, rename detection is run and the associated timing
2271 will be reported. The extra details come at the cost of slower command
2303 will be reported. The extra details come at the cost of slower command
2272 execution.
2304 execution.
2273
2305
2274 Since rename detection is only run once, other factors might easily
2306 Since rename detection is only run once, other factors might easily
2275 affect the precision of the timing. However it should give a good
2307 affect the precision of the timing. However it should give a good
2276 approximation of which revision triplets are very costly.
2308 approximation of which revision triplets are very costly.
2277 """
2309 """
2278 opts = _byteskwargs(opts)
2310 opts = _byteskwargs(opts)
2279 fm = ui.formatter(b'perf', opts)
2311 fm = ui.formatter(b'perf', opts)
2280 dotiming = opts[b'timing']
2312 dotiming = opts[b'timing']
2281 dostats = opts[b'stats']
2313 dostats = opts[b'stats']
2282
2314
2283 output_template = [
2315 output_template = [
2284 ("base", "%(base)12s"),
2316 ("base", "%(base)12s"),
2285 ("p1", "%(p1.node)12s"),
2317 ("p1", "%(p1.node)12s"),
2286 ("p2", "%(p2.node)12s"),
2318 ("p2", "%(p2.node)12s"),
2287 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2319 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2288 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2320 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2289 ("p1.renames", "%(p1.renamedfiles)12d"),
2321 ("p1.renames", "%(p1.renamedfiles)12d"),
2290 ("p1.time", "%(p1.time)12.3f"),
2322 ("p1.time", "%(p1.time)12.3f"),
2291 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2323 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2292 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2324 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2293 ("p2.renames", "%(p2.renamedfiles)12d"),
2325 ("p2.renames", "%(p2.renamedfiles)12d"),
2294 ("p2.time", "%(p2.time)12.3f"),
2326 ("p2.time", "%(p2.time)12.3f"),
2295 ("renames", "%(nbrenamedfiles)12d"),
2327 ("renames", "%(nbrenamedfiles)12d"),
2296 ("total.time", "%(time)12.3f"),
2328 ("total.time", "%(time)12.3f"),
2297 ]
2329 ]
2298 if not dotiming:
2330 if not dotiming:
2299 output_template = [
2331 output_template = [
2300 i
2332 i
2301 for i in output_template
2333 for i in output_template
2302 if not ('time' in i[0] or 'renames' in i[0])
2334 if not ('time' in i[0] or 'renames' in i[0])
2303 ]
2335 ]
2304 header_names = [h for (h, v) in output_template]
2336 header_names = [h for (h, v) in output_template]
2305 output = ' '.join([v for (h, v) in output_template]) + '\n'
2337 output = ' '.join([v for (h, v) in output_template]) + '\n'
2306 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2338 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2307 fm.plain(header % tuple(header_names))
2339 fm.plain(header % tuple(header_names))
2308
2340
2309 if not revs:
2341 if not revs:
2310 revs = ['all()']
2342 revs = ['all()']
2311 revs = scmutil.revrange(repo, revs)
2343 revs = scmutil.revrange(repo, revs)
2312
2344
2313 if dostats:
2345 if dostats:
2314 alldata = {
2346 alldata = {
2315 'nbrevs': [],
2347 'nbrevs': [],
2316 'nbmissingfiles': [],
2348 'nbmissingfiles': [],
2317 }
2349 }
2318 if dotiming:
2350 if dotiming:
2319 alldata['parentnbrenames'] = []
2351 alldata['parentnbrenames'] = []
2320 alldata['totalnbrenames'] = []
2352 alldata['totalnbrenames'] = []
2321 alldata['parenttime'] = []
2353 alldata['parenttime'] = []
2322 alldata['totaltime'] = []
2354 alldata['totaltime'] = []
2323
2355
2324 roi = repo.revs('merge() and %ld', revs)
2356 roi = repo.revs('merge() and %ld', revs)
2325 for r in roi:
2357 for r in roi:
2326 ctx = repo[r]
2358 ctx = repo[r]
2327 p1 = ctx.p1()
2359 p1 = ctx.p1()
2328 p2 = ctx.p2()
2360 p2 = ctx.p2()
2329 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2361 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2330 for b in bases:
2362 for b in bases:
2331 b = repo[b]
2363 b = repo[b]
2332 p1missing = copies._computeforwardmissing(b, p1)
2364 p1missing = copies._computeforwardmissing(b, p1)
2333 p2missing = copies._computeforwardmissing(b, p2)
2365 p2missing = copies._computeforwardmissing(b, p2)
2334 data = {
2366 data = {
2335 b'base': b.hex(),
2367 b'base': b.hex(),
2336 b'p1.node': p1.hex(),
2368 b'p1.node': p1.hex(),
2337 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2369 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2338 b'p1.nbmissingfiles': len(p1missing),
2370 b'p1.nbmissingfiles': len(p1missing),
2339 b'p2.node': p2.hex(),
2371 b'p2.node': p2.hex(),
2340 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2372 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2341 b'p2.nbmissingfiles': len(p2missing),
2373 b'p2.nbmissingfiles': len(p2missing),
2342 }
2374 }
2343 if dostats:
2375 if dostats:
2344 if p1missing:
2376 if p1missing:
2345 alldata['nbrevs'].append(
2377 alldata['nbrevs'].append(
2346 (data['p1.nbrevs'], b.hex(), p1.hex())
2378 (data['p1.nbrevs'], b.hex(), p1.hex())
2347 )
2379 )
2348 alldata['nbmissingfiles'].append(
2380 alldata['nbmissingfiles'].append(
2349 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2381 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2350 )
2382 )
2351 if p2missing:
2383 if p2missing:
2352 alldata['nbrevs'].append(
2384 alldata['nbrevs'].append(
2353 (data['p2.nbrevs'], b.hex(), p2.hex())
2385 (data['p2.nbrevs'], b.hex(), p2.hex())
2354 )
2386 )
2355 alldata['nbmissingfiles'].append(
2387 alldata['nbmissingfiles'].append(
2356 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2388 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2357 )
2389 )
2358 if dotiming:
2390 if dotiming:
2359 begin = util.timer()
2391 begin = util.timer()
2360 mergedata = copies.mergecopies(repo, p1, p2, b)
2392 mergedata = copies.mergecopies(repo, p1, p2, b)
2361 end = util.timer()
2393 end = util.timer()
2362 # not very stable timing since we did only one run
2394 # not very stable timing since we did only one run
2363 data['time'] = end - begin
2395 data['time'] = end - begin
2364 # mergedata contains five dicts: "copy", "movewithdir",
2396 # mergedata contains five dicts: "copy", "movewithdir",
2365 # "diverge", "renamedelete" and "dirmove".
2397 # "diverge", "renamedelete" and "dirmove".
2366 # The first 4 are about renamed file so lets count that.
2398 # The first 4 are about renamed file so lets count that.
2367 renames = len(mergedata[0])
2399 renames = len(mergedata[0])
2368 renames += len(mergedata[1])
2400 renames += len(mergedata[1])
2369 renames += len(mergedata[2])
2401 renames += len(mergedata[2])
2370 renames += len(mergedata[3])
2402 renames += len(mergedata[3])
2371 data['nbrenamedfiles'] = renames
2403 data['nbrenamedfiles'] = renames
2372 begin = util.timer()
2404 begin = util.timer()
2373 p1renames = copies.pathcopies(b, p1)
2405 p1renames = copies.pathcopies(b, p1)
2374 end = util.timer()
2406 end = util.timer()
2375 data['p1.time'] = end - begin
2407 data['p1.time'] = end - begin
2376 begin = util.timer()
2408 begin = util.timer()
2377 p2renames = copies.pathcopies(b, p2)
2409 p2renames = copies.pathcopies(b, p2)
2378 end = util.timer()
2410 end = util.timer()
2379 data['p2.time'] = end - begin
2411 data['p2.time'] = end - begin
2380 data['p1.renamedfiles'] = len(p1renames)
2412 data['p1.renamedfiles'] = len(p1renames)
2381 data['p2.renamedfiles'] = len(p2renames)
2413 data['p2.renamedfiles'] = len(p2renames)
2382
2414
2383 if dostats:
2415 if dostats:
2384 if p1missing:
2416 if p1missing:
2385 alldata['parentnbrenames'].append(
2417 alldata['parentnbrenames'].append(
2386 (data['p1.renamedfiles'], b.hex(), p1.hex())
2418 (data['p1.renamedfiles'], b.hex(), p1.hex())
2387 )
2419 )
2388 alldata['parenttime'].append(
2420 alldata['parenttime'].append(
2389 (data['p1.time'], b.hex(), p1.hex())
2421 (data['p1.time'], b.hex(), p1.hex())
2390 )
2422 )
2391 if p2missing:
2423 if p2missing:
2392 alldata['parentnbrenames'].append(
2424 alldata['parentnbrenames'].append(
2393 (data['p2.renamedfiles'], b.hex(), p2.hex())
2425 (data['p2.renamedfiles'], b.hex(), p2.hex())
2394 )
2426 )
2395 alldata['parenttime'].append(
2427 alldata['parenttime'].append(
2396 (data['p2.time'], b.hex(), p2.hex())
2428 (data['p2.time'], b.hex(), p2.hex())
2397 )
2429 )
2398 if p1missing or p2missing:
2430 if p1missing or p2missing:
2399 alldata['totalnbrenames'].append(
2431 alldata['totalnbrenames'].append(
2400 (
2432 (
2401 data['nbrenamedfiles'],
2433 data['nbrenamedfiles'],
2402 b.hex(),
2434 b.hex(),
2403 p1.hex(),
2435 p1.hex(),
2404 p2.hex(),
2436 p2.hex(),
2405 )
2437 )
2406 )
2438 )
2407 alldata['totaltime'].append(
2439 alldata['totaltime'].append(
2408 (data['time'], b.hex(), p1.hex(), p2.hex())
2440 (data['time'], b.hex(), p1.hex(), p2.hex())
2409 )
2441 )
2410 fm.startitem()
2442 fm.startitem()
2411 fm.data(**data)
2443 fm.data(**data)
2412 # make node pretty for the human output
2444 # make node pretty for the human output
2413 out = data.copy()
2445 out = data.copy()
2414 out['base'] = fm.hexfunc(b.node())
2446 out['base'] = fm.hexfunc(b.node())
2415 out['p1.node'] = fm.hexfunc(p1.node())
2447 out['p1.node'] = fm.hexfunc(p1.node())
2416 out['p2.node'] = fm.hexfunc(p2.node())
2448 out['p2.node'] = fm.hexfunc(p2.node())
2417 fm.plain(output % out)
2449 fm.plain(output % out)
2418
2450
2419 fm.end()
2451 fm.end()
2420 if dostats:
2452 if dostats:
2421 # use a second formatter because the data are quite different, not sure
2453 # use a second formatter because the data are quite different, not sure
2422 # how it flies with the templater.
2454 # how it flies with the templater.
2423 entries = [
2455 entries = [
2424 ('nbrevs', 'number of revision covered'),
2456 ('nbrevs', 'number of revision covered'),
2425 ('nbmissingfiles', 'number of missing files at head'),
2457 ('nbmissingfiles', 'number of missing files at head'),
2426 ]
2458 ]
2427 if dotiming:
2459 if dotiming:
2428 entries.append(
2460 entries.append(
2429 ('parentnbrenames', 'rename from one parent to base')
2461 ('parentnbrenames', 'rename from one parent to base')
2430 )
2462 )
2431 entries.append(('totalnbrenames', 'total number of renames'))
2463 entries.append(('totalnbrenames', 'total number of renames'))
2432 entries.append(('parenttime', 'time for one parent'))
2464 entries.append(('parenttime', 'time for one parent'))
2433 entries.append(('totaltime', 'time for both parents'))
2465 entries.append(('totaltime', 'time for both parents'))
2434 _displaystats(ui, opts, entries, alldata)
2466 _displaystats(ui, opts, entries, alldata)
2435
2467
2436
2468
2437 @command(
2469 @command(
2438 b'perf::helper-pathcopies|perfhelper-pathcopies',
2470 b'perf::helper-pathcopies|perfhelper-pathcopies',
2439 formatteropts
2471 formatteropts
2440 + [
2472 + [
2441 (b'r', b'revs', [], b'restrict search to these revisions'),
2473 (b'r', b'revs', [], b'restrict search to these revisions'),
2442 (b'', b'timing', False, b'provides extra data (costly)'),
2474 (b'', b'timing', False, b'provides extra data (costly)'),
2443 (b'', b'stats', False, b'provides statistic about the measured data'),
2475 (b'', b'stats', False, b'provides statistic about the measured data'),
2444 ],
2476 ],
2445 )
2477 )
2446 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2478 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2447 """find statistic about potential parameters for the `perftracecopies`
2479 """find statistic about potential parameters for the `perftracecopies`
2448
2480
2449 This command find source-destination pair relevant for copytracing testing.
2481 This command find source-destination pair relevant for copytracing testing.
2450 It report value for some of the parameters that impact copy tracing time.
2482 It report value for some of the parameters that impact copy tracing time.
2451
2483
2452 If `--timing` is set, rename detection is run and the associated timing
2484 If `--timing` is set, rename detection is run and the associated timing
2453 will be reported. The extra details comes at the cost of a slower command
2485 will be reported. The extra details comes at the cost of a slower command
2454 execution.
2486 execution.
2455
2487
2456 Since the rename detection is only run once, other factors might easily
2488 Since the rename detection is only run once, other factors might easily
2457 affect the precision of the timing. However it should give a good
2489 affect the precision of the timing. However it should give a good
2458 approximation of which revision pairs are very costly.
2490 approximation of which revision pairs are very costly.
2459 """
2491 """
2460 opts = _byteskwargs(opts)
2492 opts = _byteskwargs(opts)
2461 fm = ui.formatter(b'perf', opts)
2493 fm = ui.formatter(b'perf', opts)
2462 dotiming = opts[b'timing']
2494 dotiming = opts[b'timing']
2463 dostats = opts[b'stats']
2495 dostats = opts[b'stats']
2464
2496
2465 if dotiming:
2497 if dotiming:
2466 header = '%12s %12s %12s %12s %12s %12s\n'
2498 header = '%12s %12s %12s %12s %12s %12s\n'
2467 output = (
2499 output = (
2468 "%(source)12s %(destination)12s "
2500 "%(source)12s %(destination)12s "
2469 "%(nbrevs)12d %(nbmissingfiles)12d "
2501 "%(nbrevs)12d %(nbmissingfiles)12d "
2470 "%(nbrenamedfiles)12d %(time)18.5f\n"
2502 "%(nbrenamedfiles)12d %(time)18.5f\n"
2471 )
2503 )
2472 header_names = (
2504 header_names = (
2473 "source",
2505 "source",
2474 "destination",
2506 "destination",
2475 "nb-revs",
2507 "nb-revs",
2476 "nb-files",
2508 "nb-files",
2477 "nb-renames",
2509 "nb-renames",
2478 "time",
2510 "time",
2479 )
2511 )
2480 fm.plain(header % header_names)
2512 fm.plain(header % header_names)
2481 else:
2513 else:
2482 header = '%12s %12s %12s %12s\n'
2514 header = '%12s %12s %12s %12s\n'
2483 output = (
2515 output = (
2484 "%(source)12s %(destination)12s "
2516 "%(source)12s %(destination)12s "
2485 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2517 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2486 )
2518 )
2487 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2519 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2488
2520
2489 if not revs:
2521 if not revs:
2490 revs = ['all()']
2522 revs = ['all()']
2491 revs = scmutil.revrange(repo, revs)
2523 revs = scmutil.revrange(repo, revs)
2492
2524
2493 if dostats:
2525 if dostats:
2494 alldata = {
2526 alldata = {
2495 'nbrevs': [],
2527 'nbrevs': [],
2496 'nbmissingfiles': [],
2528 'nbmissingfiles': [],
2497 }
2529 }
2498 if dotiming:
2530 if dotiming:
2499 alldata['nbrenames'] = []
2531 alldata['nbrenames'] = []
2500 alldata['time'] = []
2532 alldata['time'] = []
2501
2533
2502 roi = repo.revs('merge() and %ld', revs)
2534 roi = repo.revs('merge() and %ld', revs)
2503 for r in roi:
2535 for r in roi:
2504 ctx = repo[r]
2536 ctx = repo[r]
2505 p1 = ctx.p1().rev()
2537 p1 = ctx.p1().rev()
2506 p2 = ctx.p2().rev()
2538 p2 = ctx.p2().rev()
2507 bases = repo.changelog._commonancestorsheads(p1, p2)
2539 bases = repo.changelog._commonancestorsheads(p1, p2)
2508 for p in (p1, p2):
2540 for p in (p1, p2):
2509 for b in bases:
2541 for b in bases:
2510 base = repo[b]
2542 base = repo[b]
2511 parent = repo[p]
2543 parent = repo[p]
2512 missing = copies._computeforwardmissing(base, parent)
2544 missing = copies._computeforwardmissing(base, parent)
2513 if not missing:
2545 if not missing:
2514 continue
2546 continue
2515 data = {
2547 data = {
2516 b'source': base.hex(),
2548 b'source': base.hex(),
2517 b'destination': parent.hex(),
2549 b'destination': parent.hex(),
2518 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2550 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2519 b'nbmissingfiles': len(missing),
2551 b'nbmissingfiles': len(missing),
2520 }
2552 }
2521 if dostats:
2553 if dostats:
2522 alldata['nbrevs'].append(
2554 alldata['nbrevs'].append(
2523 (
2555 (
2524 data['nbrevs'],
2556 data['nbrevs'],
2525 base.hex(),
2557 base.hex(),
2526 parent.hex(),
2558 parent.hex(),
2527 )
2559 )
2528 )
2560 )
2529 alldata['nbmissingfiles'].append(
2561 alldata['nbmissingfiles'].append(
2530 (
2562 (
2531 data['nbmissingfiles'],
2563 data['nbmissingfiles'],
2532 base.hex(),
2564 base.hex(),
2533 parent.hex(),
2565 parent.hex(),
2534 )
2566 )
2535 )
2567 )
2536 if dotiming:
2568 if dotiming:
2537 begin = util.timer()
2569 begin = util.timer()
2538 renames = copies.pathcopies(base, parent)
2570 renames = copies.pathcopies(base, parent)
2539 end = util.timer()
2571 end = util.timer()
2540 # not very stable timing since we did only one run
2572 # not very stable timing since we did only one run
2541 data['time'] = end - begin
2573 data['time'] = end - begin
2542 data['nbrenamedfiles'] = len(renames)
2574 data['nbrenamedfiles'] = len(renames)
2543 if dostats:
2575 if dostats:
2544 alldata['time'].append(
2576 alldata['time'].append(
2545 (
2577 (
2546 data['time'],
2578 data['time'],
2547 base.hex(),
2579 base.hex(),
2548 parent.hex(),
2580 parent.hex(),
2549 )
2581 )
2550 )
2582 )
2551 alldata['nbrenames'].append(
2583 alldata['nbrenames'].append(
2552 (
2584 (
2553 data['nbrenamedfiles'],
2585 data['nbrenamedfiles'],
2554 base.hex(),
2586 base.hex(),
2555 parent.hex(),
2587 parent.hex(),
2556 )
2588 )
2557 )
2589 )
2558 fm.startitem()
2590 fm.startitem()
2559 fm.data(**data)
2591 fm.data(**data)
2560 out = data.copy()
2592 out = data.copy()
2561 out['source'] = fm.hexfunc(base.node())
2593 out['source'] = fm.hexfunc(base.node())
2562 out['destination'] = fm.hexfunc(parent.node())
2594 out['destination'] = fm.hexfunc(parent.node())
2563 fm.plain(output % out)
2595 fm.plain(output % out)
2564
2596
2565 fm.end()
2597 fm.end()
2566 if dostats:
2598 if dostats:
2567 entries = [
2599 entries = [
2568 ('nbrevs', 'number of revision covered'),
2600 ('nbrevs', 'number of revision covered'),
2569 ('nbmissingfiles', 'number of missing files at head'),
2601 ('nbmissingfiles', 'number of missing files at head'),
2570 ]
2602 ]
2571 if dotiming:
2603 if dotiming:
2572 entries.append(('nbrenames', 'renamed files'))
2604 entries.append(('nbrenames', 'renamed files'))
2573 entries.append(('time', 'time'))
2605 entries.append(('time', 'time'))
2574 _displaystats(ui, opts, entries, alldata)
2606 _displaystats(ui, opts, entries, alldata)
2575
2607
2576
2608
2577 @command(b'perf::cca|perfcca', formatteropts)
2609 @command(b'perf::cca|perfcca', formatteropts)
2578 def perfcca(ui, repo, **opts):
2610 def perfcca(ui, repo, **opts):
2579 opts = _byteskwargs(opts)
2611 opts = _byteskwargs(opts)
2580 timer, fm = gettimer(ui, opts)
2612 timer, fm = gettimer(ui, opts)
2581 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2613 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2582 fm.end()
2614 fm.end()
2583
2615
2584
2616
2585 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2617 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2586 def perffncacheload(ui, repo, **opts):
2618 def perffncacheload(ui, repo, **opts):
2587 opts = _byteskwargs(opts)
2619 opts = _byteskwargs(opts)
2588 timer, fm = gettimer(ui, opts)
2620 timer, fm = gettimer(ui, opts)
2589 s = repo.store
2621 s = repo.store
2590
2622
2591 def d():
2623 def d():
2592 s.fncache._load()
2624 s.fncache._load()
2593
2625
2594 timer(d)
2626 timer(d)
2595 fm.end()
2627 fm.end()
2596
2628
2597
2629
2598 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2630 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2599 def perffncachewrite(ui, repo, **opts):
2631 def perffncachewrite(ui, repo, **opts):
2600 opts = _byteskwargs(opts)
2632 opts = _byteskwargs(opts)
2601 timer, fm = gettimer(ui, opts)
2633 timer, fm = gettimer(ui, opts)
2602 s = repo.store
2634 s = repo.store
2603 lock = repo.lock()
2635 lock = repo.lock()
2604 s.fncache._load()
2636 s.fncache._load()
2605 tr = repo.transaction(b'perffncachewrite')
2637 tr = repo.transaction(b'perffncachewrite')
2606 tr.addbackup(b'fncache')
2638 tr.addbackup(b'fncache')
2607
2639
2608 def d():
2640 def d():
2609 s.fncache._dirty = True
2641 s.fncache._dirty = True
2610 s.fncache.write(tr)
2642 s.fncache.write(tr)
2611
2643
2612 timer(d)
2644 timer(d)
2613 tr.close()
2645 tr.close()
2614 lock.release()
2646 lock.release()
2615 fm.end()
2647 fm.end()
2616
2648
2617
2649
2618 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2650 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2619 def perffncacheencode(ui, repo, **opts):
2651 def perffncacheencode(ui, repo, **opts):
2620 opts = _byteskwargs(opts)
2652 opts = _byteskwargs(opts)
2621 timer, fm = gettimer(ui, opts)
2653 timer, fm = gettimer(ui, opts)
2622 s = repo.store
2654 s = repo.store
2623 s.fncache._load()
2655 s.fncache._load()
2624
2656
2625 def d():
2657 def d():
2626 for p in s.fncache.entries:
2658 for p in s.fncache.entries:
2627 s.encode(p)
2659 s.encode(p)
2628
2660
2629 timer(d)
2661 timer(d)
2630 fm.end()
2662 fm.end()
2631
2663
2632
2664
2633 def _bdiffworker(q, blocks, xdiff, ready, done):
2665 def _bdiffworker(q, blocks, xdiff, ready, done):
2634 while not done.is_set():
2666 while not done.is_set():
2635 pair = q.get()
2667 pair = q.get()
2636 while pair is not None:
2668 while pair is not None:
2637 if xdiff:
2669 if xdiff:
2638 mdiff.bdiff.xdiffblocks(*pair)
2670 mdiff.bdiff.xdiffblocks(*pair)
2639 elif blocks:
2671 elif blocks:
2640 mdiff.bdiff.blocks(*pair)
2672 mdiff.bdiff.blocks(*pair)
2641 else:
2673 else:
2642 mdiff.textdiff(*pair)
2674 mdiff.textdiff(*pair)
2643 q.task_done()
2675 q.task_done()
2644 pair = q.get()
2676 pair = q.get()
2645 q.task_done() # for the None one
2677 q.task_done() # for the None one
2646 with ready:
2678 with ready:
2647 ready.wait()
2679 ready.wait()
2648
2680
2649
2681
2650 def _manifestrevision(repo, mnode):
2682 def _manifestrevision(repo, mnode):
2651 ml = repo.manifestlog
2683 ml = repo.manifestlog
2652
2684
2653 if util.safehasattr(ml, b'getstorage'):
2685 if util.safehasattr(ml, b'getstorage'):
2654 store = ml.getstorage(b'')
2686 store = ml.getstorage(b'')
2655 else:
2687 else:
2656 store = ml._revlog
2688 store = ml._revlog
2657
2689
2658 return store.revision(mnode)
2690 return store.revision(mnode)
2659
2691
2660
2692
2661 @command(
2693 @command(
2662 b'perf::bdiff|perfbdiff',
2694 b'perf::bdiff|perfbdiff',
2663 revlogopts
2695 revlogopts
2664 + formatteropts
2696 + formatteropts
2665 + [
2697 + [
2666 (
2698 (
2667 b'',
2699 b'',
2668 b'count',
2700 b'count',
2669 1,
2701 1,
2670 b'number of revisions to test (when using --startrev)',
2702 b'number of revisions to test (when using --startrev)',
2671 ),
2703 ),
2672 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2704 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2673 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2705 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2674 (b'', b'blocks', False, b'test computing diffs into blocks'),
2706 (b'', b'blocks', False, b'test computing diffs into blocks'),
2675 (b'', b'xdiff', False, b'use xdiff algorithm'),
2707 (b'', b'xdiff', False, b'use xdiff algorithm'),
2676 ],
2708 ],
2677 b'-c|-m|FILE REV',
2709 b'-c|-m|FILE REV',
2678 )
2710 )
2679 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2711 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2680 """benchmark a bdiff between revisions
2712 """benchmark a bdiff between revisions
2681
2713
2682 By default, benchmark a bdiff between its delta parent and itself.
2714 By default, benchmark a bdiff between its delta parent and itself.
2683
2715
2684 With ``--count``, benchmark bdiffs between delta parents and self for N
2716 With ``--count``, benchmark bdiffs between delta parents and self for N
2685 revisions starting at the specified revision.
2717 revisions starting at the specified revision.
2686
2718
2687 With ``--alldata``, assume the requested revision is a changeset and
2719 With ``--alldata``, assume the requested revision is a changeset and
2688 measure bdiffs for all changes related to that changeset (manifest
2720 measure bdiffs for all changes related to that changeset (manifest
2689 and filelogs).
2721 and filelogs).
2690 """
2722 """
2691 opts = _byteskwargs(opts)
2723 opts = _byteskwargs(opts)
2692
2724
2693 if opts[b'xdiff'] and not opts[b'blocks']:
2725 if opts[b'xdiff'] and not opts[b'blocks']:
2694 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2726 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2695
2727
2696 if opts[b'alldata']:
2728 if opts[b'alldata']:
2697 opts[b'changelog'] = True
2729 opts[b'changelog'] = True
2698
2730
2699 if opts.get(b'changelog') or opts.get(b'manifest'):
2731 if opts.get(b'changelog') or opts.get(b'manifest'):
2700 file_, rev = None, file_
2732 file_, rev = None, file_
2701 elif rev is None:
2733 elif rev is None:
2702 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2734 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2703
2735
2704 blocks = opts[b'blocks']
2736 blocks = opts[b'blocks']
2705 xdiff = opts[b'xdiff']
2737 xdiff = opts[b'xdiff']
2706 textpairs = []
2738 textpairs = []
2707
2739
2708 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2740 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2709
2741
2710 startrev = r.rev(r.lookup(rev))
2742 startrev = r.rev(r.lookup(rev))
2711 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2743 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2712 if opts[b'alldata']:
2744 if opts[b'alldata']:
2713 # Load revisions associated with changeset.
2745 # Load revisions associated with changeset.
2714 ctx = repo[rev]
2746 ctx = repo[rev]
2715 mtext = _manifestrevision(repo, ctx.manifestnode())
2747 mtext = _manifestrevision(repo, ctx.manifestnode())
2716 for pctx in ctx.parents():
2748 for pctx in ctx.parents():
2717 pman = _manifestrevision(repo, pctx.manifestnode())
2749 pman = _manifestrevision(repo, pctx.manifestnode())
2718 textpairs.append((pman, mtext))
2750 textpairs.append((pman, mtext))
2719
2751
2720 # Load filelog revisions by iterating manifest delta.
2752 # Load filelog revisions by iterating manifest delta.
2721 man = ctx.manifest()
2753 man = ctx.manifest()
2722 pman = ctx.p1().manifest()
2754 pman = ctx.p1().manifest()
2723 for filename, change in pman.diff(man).items():
2755 for filename, change in pman.diff(man).items():
2724 fctx = repo.file(filename)
2756 fctx = repo.file(filename)
2725 f1 = fctx.revision(change[0][0] or -1)
2757 f1 = fctx.revision(change[0][0] or -1)
2726 f2 = fctx.revision(change[1][0] or -1)
2758 f2 = fctx.revision(change[1][0] or -1)
2727 textpairs.append((f1, f2))
2759 textpairs.append((f1, f2))
2728 else:
2760 else:
2729 dp = r.deltaparent(rev)
2761 dp = r.deltaparent(rev)
2730 textpairs.append((r.revision(dp), r.revision(rev)))
2762 textpairs.append((r.revision(dp), r.revision(rev)))
2731
2763
2732 withthreads = threads > 0
2764 withthreads = threads > 0
2733 if not withthreads:
2765 if not withthreads:
2734
2766
2735 def d():
2767 def d():
2736 for pair in textpairs:
2768 for pair in textpairs:
2737 if xdiff:
2769 if xdiff:
2738 mdiff.bdiff.xdiffblocks(*pair)
2770 mdiff.bdiff.xdiffblocks(*pair)
2739 elif blocks:
2771 elif blocks:
2740 mdiff.bdiff.blocks(*pair)
2772 mdiff.bdiff.blocks(*pair)
2741 else:
2773 else:
2742 mdiff.textdiff(*pair)
2774 mdiff.textdiff(*pair)
2743
2775
2744 else:
2776 else:
2745 q = queue()
2777 q = queue()
2746 for i in _xrange(threads):
2778 for i in _xrange(threads):
2747 q.put(None)
2779 q.put(None)
2748 ready = threading.Condition()
2780 ready = threading.Condition()
2749 done = threading.Event()
2781 done = threading.Event()
2750 for i in _xrange(threads):
2782 for i in _xrange(threads):
2751 threading.Thread(
2783 threading.Thread(
2752 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2784 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2753 ).start()
2785 ).start()
2754 q.join()
2786 q.join()
2755
2787
2756 def d():
2788 def d():
2757 for pair in textpairs:
2789 for pair in textpairs:
2758 q.put(pair)
2790 q.put(pair)
2759 for i in _xrange(threads):
2791 for i in _xrange(threads):
2760 q.put(None)
2792 q.put(None)
2761 with ready:
2793 with ready:
2762 ready.notify_all()
2794 ready.notify_all()
2763 q.join()
2795 q.join()
2764
2796
2765 timer, fm = gettimer(ui, opts)
2797 timer, fm = gettimer(ui, opts)
2766 timer(d)
2798 timer(d)
2767 fm.end()
2799 fm.end()
2768
2800
2769 if withthreads:
2801 if withthreads:
2770 done.set()
2802 done.set()
2771 for i in _xrange(threads):
2803 for i in _xrange(threads):
2772 q.put(None)
2804 q.put(None)
2773 with ready:
2805 with ready:
2774 ready.notify_all()
2806 ready.notify_all()
2775
2807
2776
2808
2777 @command(
2809 @command(
2778 b'perf::unbundle',
2810 b'perf::unbundle',
2779 formatteropts,
2811 formatteropts,
2780 b'BUNDLE_FILE',
2812 b'BUNDLE_FILE',
2781 )
2813 )
2782 def perf_unbundle(ui, repo, fname, **opts):
2814 def perf_unbundle(ui, repo, fname, **opts):
2783 """benchmark application of a bundle in a repository.
2815 """benchmark application of a bundle in a repository.
2784
2816
2785 This does not include the final transaction processing"""
2817 This does not include the final transaction processing"""
2786
2818
2787 from mercurial import exchange
2819 from mercurial import exchange
2788 from mercurial import bundle2
2820 from mercurial import bundle2
2789 from mercurial import transaction
2821 from mercurial import transaction
2790
2822
2791 opts = _byteskwargs(opts)
2823 opts = _byteskwargs(opts)
2792
2824
2793 ### some compatibility hotfix
2825 ### some compatibility hotfix
2794 #
2826 #
2795 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2827 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2796 # critical regression that break transaction rollback for files that are
2828 # critical regression that break transaction rollback for files that are
2797 # de-inlined.
2829 # de-inlined.
2798 method = transaction.transaction._addentry
2830 method = transaction.transaction._addentry
2799 pre_63edc384d3b7 = "data" in getargspec(method).args
2831 pre_63edc384d3b7 = "data" in getargspec(method).args
2800 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2832 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2801 # a changeset that is a close descendant of 18415fc918a1, the changeset
2833 # a changeset that is a close descendant of 18415fc918a1, the changeset
2802 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2834 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2803 args = getargspec(error.Abort.__init__).args
2835 args = getargspec(error.Abort.__init__).args
2804 post_18415fc918a1 = "detailed_exit_code" in args
2836 post_18415fc918a1 = "detailed_exit_code" in args
2805
2837
2806 old_max_inline = None
2838 old_max_inline = None
2807 try:
2839 try:
2808 if not (pre_63edc384d3b7 or post_18415fc918a1):
2840 if not (pre_63edc384d3b7 or post_18415fc918a1):
2809 # disable inlining
2841 # disable inlining
2810 old_max_inline = mercurial.revlog._maxinline
2842 old_max_inline = mercurial.revlog._maxinline
2811 # large enough to never happen
2843 # large enough to never happen
2812 mercurial.revlog._maxinline = 2 ** 50
2844 mercurial.revlog._maxinline = 2 ** 50
2813
2845
2814 with repo.lock():
2846 with repo.lock():
2815 bundle = [None, None]
2847 bundle = [None, None]
2816 orig_quiet = repo.ui.quiet
2848 orig_quiet = repo.ui.quiet
2817 try:
2849 try:
2818 repo.ui.quiet = True
2850 repo.ui.quiet = True
2819 with open(fname, mode="rb") as f:
2851 with open(fname, mode="rb") as f:
2820
2852
2821 def noop_report(*args, **kwargs):
2853 def noop_report(*args, **kwargs):
2822 pass
2854 pass
2823
2855
2824 def setup():
2856 def setup():
2825 gen, tr = bundle
2857 gen, tr = bundle
2826 if tr is not None:
2858 if tr is not None:
2827 tr.abort()
2859 tr.abort()
2828 bundle[:] = [None, None]
2860 bundle[:] = [None, None]
2829 f.seek(0)
2861 f.seek(0)
2830 bundle[0] = exchange.readbundle(ui, f, fname)
2862 bundle[0] = exchange.readbundle(ui, f, fname)
2831 bundle[1] = repo.transaction(b'perf::unbundle')
2863 bundle[1] = repo.transaction(b'perf::unbundle')
2832 # silence the transaction
2864 # silence the transaction
2833 bundle[1]._report = noop_report
2865 bundle[1]._report = noop_report
2834
2866
2835 def apply():
2867 def apply():
2836 gen, tr = bundle
2868 gen, tr = bundle
2837 bundle2.applybundle(
2869 bundle2.applybundle(
2838 repo,
2870 repo,
2839 gen,
2871 gen,
2840 tr,
2872 tr,
2841 source=b'perf::unbundle',
2873 source=b'perf::unbundle',
2842 url=fname,
2874 url=fname,
2843 )
2875 )
2844
2876
2845 timer, fm = gettimer(ui, opts)
2877 timer, fm = gettimer(ui, opts)
2846 timer(apply, setup=setup)
2878 timer(apply, setup=setup)
2847 fm.end()
2879 fm.end()
2848 finally:
2880 finally:
2849 repo.ui.quiet == orig_quiet
2881 repo.ui.quiet == orig_quiet
2850 gen, tr = bundle
2882 gen, tr = bundle
2851 if tr is not None:
2883 if tr is not None:
2852 tr.abort()
2884 tr.abort()
2853 finally:
2885 finally:
2854 if old_max_inline is not None:
2886 if old_max_inline is not None:
2855 mercurial.revlog._maxinline = old_max_inline
2887 mercurial.revlog._maxinline = old_max_inline
2856
2888
2857
2889
2858 @command(
2890 @command(
2859 b'perf::unidiff|perfunidiff',
2891 b'perf::unidiff|perfunidiff',
2860 revlogopts
2892 revlogopts
2861 + formatteropts
2893 + formatteropts
2862 + [
2894 + [
2863 (
2895 (
2864 b'',
2896 b'',
2865 b'count',
2897 b'count',
2866 1,
2898 1,
2867 b'number of revisions to test (when using --startrev)',
2899 b'number of revisions to test (when using --startrev)',
2868 ),
2900 ),
2869 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2901 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2870 ],
2902 ],
2871 b'-c|-m|FILE REV',
2903 b'-c|-m|FILE REV',
2872 )
2904 )
2873 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2905 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2874 """benchmark a unified diff between revisions
2906 """benchmark a unified diff between revisions
2875
2907
2876 This doesn't include any copy tracing - it's just a unified diff
2908 This doesn't include any copy tracing - it's just a unified diff
2877 of the texts.
2909 of the texts.
2878
2910
2879 By default, benchmark a diff between its delta parent and itself.
2911 By default, benchmark a diff between its delta parent and itself.
2880
2912
2881 With ``--count``, benchmark diffs between delta parents and self for N
2913 With ``--count``, benchmark diffs between delta parents and self for N
2882 revisions starting at the specified revision.
2914 revisions starting at the specified revision.
2883
2915
2884 With ``--alldata``, assume the requested revision is a changeset and
2916 With ``--alldata``, assume the requested revision is a changeset and
2885 measure diffs for all changes related to that changeset (manifest
2917 measure diffs for all changes related to that changeset (manifest
2886 and filelogs).
2918 and filelogs).
2887 """
2919 """
2888 opts = _byteskwargs(opts)
2920 opts = _byteskwargs(opts)
2889 if opts[b'alldata']:
2921 if opts[b'alldata']:
2890 opts[b'changelog'] = True
2922 opts[b'changelog'] = True
2891
2923
2892 if opts.get(b'changelog') or opts.get(b'manifest'):
2924 if opts.get(b'changelog') or opts.get(b'manifest'):
2893 file_, rev = None, file_
2925 file_, rev = None, file_
2894 elif rev is None:
2926 elif rev is None:
2895 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2927 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2896
2928
2897 textpairs = []
2929 textpairs = []
2898
2930
2899 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2931 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2900
2932
2901 startrev = r.rev(r.lookup(rev))
2933 startrev = r.rev(r.lookup(rev))
2902 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2934 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2903 if opts[b'alldata']:
2935 if opts[b'alldata']:
2904 # Load revisions associated with changeset.
2936 # Load revisions associated with changeset.
2905 ctx = repo[rev]
2937 ctx = repo[rev]
2906 mtext = _manifestrevision(repo, ctx.manifestnode())
2938 mtext = _manifestrevision(repo, ctx.manifestnode())
2907 for pctx in ctx.parents():
2939 for pctx in ctx.parents():
2908 pman = _manifestrevision(repo, pctx.manifestnode())
2940 pman = _manifestrevision(repo, pctx.manifestnode())
2909 textpairs.append((pman, mtext))
2941 textpairs.append((pman, mtext))
2910
2942
2911 # Load filelog revisions by iterating manifest delta.
2943 # Load filelog revisions by iterating manifest delta.
2912 man = ctx.manifest()
2944 man = ctx.manifest()
2913 pman = ctx.p1().manifest()
2945 pman = ctx.p1().manifest()
2914 for filename, change in pman.diff(man).items():
2946 for filename, change in pman.diff(man).items():
2915 fctx = repo.file(filename)
2947 fctx = repo.file(filename)
2916 f1 = fctx.revision(change[0][0] or -1)
2948 f1 = fctx.revision(change[0][0] or -1)
2917 f2 = fctx.revision(change[1][0] or -1)
2949 f2 = fctx.revision(change[1][0] or -1)
2918 textpairs.append((f1, f2))
2950 textpairs.append((f1, f2))
2919 else:
2951 else:
2920 dp = r.deltaparent(rev)
2952 dp = r.deltaparent(rev)
2921 textpairs.append((r.revision(dp), r.revision(rev)))
2953 textpairs.append((r.revision(dp), r.revision(rev)))
2922
2954
2923 def d():
2955 def d():
2924 for left, right in textpairs:
2956 for left, right in textpairs:
2925 # The date strings don't matter, so we pass empty strings.
2957 # The date strings don't matter, so we pass empty strings.
2926 headerlines, hunks = mdiff.unidiff(
2958 headerlines, hunks = mdiff.unidiff(
2927 left, b'', right, b'', b'left', b'right', binary=False
2959 left, b'', right, b'', b'left', b'right', binary=False
2928 )
2960 )
2929 # consume iterators in roughly the way patch.py does
2961 # consume iterators in roughly the way patch.py does
2930 b'\n'.join(headerlines)
2962 b'\n'.join(headerlines)
2931 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2963 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2932
2964
2933 timer, fm = gettimer(ui, opts)
2965 timer, fm = gettimer(ui, opts)
2934 timer(d)
2966 timer(d)
2935 fm.end()
2967 fm.end()
2936
2968
2937
2969
2938 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2970 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2939 def perfdiffwd(ui, repo, **opts):
2971 def perfdiffwd(ui, repo, **opts):
2940 """Profile diff of working directory changes"""
2972 """Profile diff of working directory changes"""
2941 opts = _byteskwargs(opts)
2973 opts = _byteskwargs(opts)
2942 timer, fm = gettimer(ui, opts)
2974 timer, fm = gettimer(ui, opts)
2943 options = {
2975 options = {
2944 'w': 'ignore_all_space',
2976 'w': 'ignore_all_space',
2945 'b': 'ignore_space_change',
2977 'b': 'ignore_space_change',
2946 'B': 'ignore_blank_lines',
2978 'B': 'ignore_blank_lines',
2947 }
2979 }
2948
2980
2949 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2981 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2950 opts = {options[c]: b'1' for c in diffopt}
2982 opts = {options[c]: b'1' for c in diffopt}
2951
2983
2952 def d():
2984 def d():
2953 ui.pushbuffer()
2985 ui.pushbuffer()
2954 commands.diff(ui, repo, **opts)
2986 commands.diff(ui, repo, **opts)
2955 ui.popbuffer()
2987 ui.popbuffer()
2956
2988
2957 diffopt = diffopt.encode('ascii')
2989 diffopt = diffopt.encode('ascii')
2958 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2990 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2959 timer(d, title=title)
2991 timer(d, title=title)
2960 fm.end()
2992 fm.end()
2961
2993
2962
2994
2963 @command(
2995 @command(
2964 b'perf::revlogindex|perfrevlogindex',
2996 b'perf::revlogindex|perfrevlogindex',
2965 revlogopts + formatteropts,
2997 revlogopts + formatteropts,
2966 b'-c|-m|FILE',
2998 b'-c|-m|FILE',
2967 )
2999 )
2968 def perfrevlogindex(ui, repo, file_=None, **opts):
3000 def perfrevlogindex(ui, repo, file_=None, **opts):
2969 """Benchmark operations against a revlog index.
3001 """Benchmark operations against a revlog index.
2970
3002
2971 This tests constructing a revlog instance, reading index data,
3003 This tests constructing a revlog instance, reading index data,
2972 parsing index data, and performing various operations related to
3004 parsing index data, and performing various operations related to
2973 index data.
3005 index data.
2974 """
3006 """
2975
3007
2976 opts = _byteskwargs(opts)
3008 opts = _byteskwargs(opts)
2977
3009
2978 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3010 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2979
3011
2980 opener = getattr(rl, 'opener') # trick linter
3012 opener = getattr(rl, 'opener') # trick linter
2981 # compat with hg <= 5.8
3013 # compat with hg <= 5.8
2982 radix = getattr(rl, 'radix', None)
3014 radix = getattr(rl, 'radix', None)
2983 indexfile = getattr(rl, '_indexfile', None)
3015 indexfile = getattr(rl, '_indexfile', None)
2984 if indexfile is None:
3016 if indexfile is None:
2985 # compatibility with <= hg-5.8
3017 # compatibility with <= hg-5.8
2986 indexfile = getattr(rl, 'indexfile')
3018 indexfile = getattr(rl, 'indexfile')
2987 data = opener.read(indexfile)
3019 data = opener.read(indexfile)
2988
3020
2989 header = struct.unpack(b'>I', data[0:4])[0]
3021 header = struct.unpack(b'>I', data[0:4])[0]
2990 version = header & 0xFFFF
3022 version = header & 0xFFFF
2991 if version == 1:
3023 if version == 1:
2992 inline = header & (1 << 16)
3024 inline = header & (1 << 16)
2993 else:
3025 else:
2994 raise error.Abort(b'unsupported revlog version: %d' % version)
3026 raise error.Abort(b'unsupported revlog version: %d' % version)
2995
3027
2996 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3028 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2997 if parse_index_v1 is None:
3029 if parse_index_v1 is None:
2998 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3030 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2999
3031
3000 rllen = len(rl)
3032 rllen = len(rl)
3001
3033
3002 node0 = rl.node(0)
3034 node0 = rl.node(0)
3003 node25 = rl.node(rllen // 4)
3035 node25 = rl.node(rllen // 4)
3004 node50 = rl.node(rllen // 2)
3036 node50 = rl.node(rllen // 2)
3005 node75 = rl.node(rllen // 4 * 3)
3037 node75 = rl.node(rllen // 4 * 3)
3006 node100 = rl.node(rllen - 1)
3038 node100 = rl.node(rllen - 1)
3007
3039
3008 allrevs = range(rllen)
3040 allrevs = range(rllen)
3009 allrevsrev = list(reversed(allrevs))
3041 allrevsrev = list(reversed(allrevs))
3010 allnodes = [rl.node(rev) for rev in range(rllen)]
3042 allnodes = [rl.node(rev) for rev in range(rllen)]
3011 allnodesrev = list(reversed(allnodes))
3043 allnodesrev = list(reversed(allnodes))
3012
3044
3013 def constructor():
3045 def constructor():
3014 if radix is not None:
3046 if radix is not None:
3015 revlog(opener, radix=radix)
3047 revlog(opener, radix=radix)
3016 else:
3048 else:
3017 # hg <= 5.8
3049 # hg <= 5.8
3018 revlog(opener, indexfile=indexfile)
3050 revlog(opener, indexfile=indexfile)
3019
3051
3020 def read():
3052 def read():
3021 with opener(indexfile) as fh:
3053 with opener(indexfile) as fh:
3022 fh.read()
3054 fh.read()
3023
3055
3024 def parseindex():
3056 def parseindex():
3025 parse_index_v1(data, inline)
3057 parse_index_v1(data, inline)
3026
3058
3027 def getentry(revornode):
3059 def getentry(revornode):
3028 index = parse_index_v1(data, inline)[0]
3060 index = parse_index_v1(data, inline)[0]
3029 index[revornode]
3061 index[revornode]
3030
3062
3031 def getentries(revs, count=1):
3063 def getentries(revs, count=1):
3032 index = parse_index_v1(data, inline)[0]
3064 index = parse_index_v1(data, inline)[0]
3033
3065
3034 for i in range(count):
3066 for i in range(count):
3035 for rev in revs:
3067 for rev in revs:
3036 index[rev]
3068 index[rev]
3037
3069
3038 def resolvenode(node):
3070 def resolvenode(node):
3039 index = parse_index_v1(data, inline)[0]
3071 index = parse_index_v1(data, inline)[0]
3040 rev = getattr(index, 'rev', None)
3072 rev = getattr(index, 'rev', None)
3041 if rev is None:
3073 if rev is None:
3042 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3074 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3043 # This only works for the C code.
3075 # This only works for the C code.
3044 if nodemap is None:
3076 if nodemap is None:
3045 return
3077 return
3046 rev = nodemap.__getitem__
3078 rev = nodemap.__getitem__
3047
3079
3048 try:
3080 try:
3049 rev(node)
3081 rev(node)
3050 except error.RevlogError:
3082 except error.RevlogError:
3051 pass
3083 pass
3052
3084
3053 def resolvenodes(nodes, count=1):
3085 def resolvenodes(nodes, count=1):
3054 index = parse_index_v1(data, inline)[0]
3086 index = parse_index_v1(data, inline)[0]
3055 rev = getattr(index, 'rev', None)
3087 rev = getattr(index, 'rev', None)
3056 if rev is None:
3088 if rev is None:
3057 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3089 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3058 # This only works for the C code.
3090 # This only works for the C code.
3059 if nodemap is None:
3091 if nodemap is None:
3060 return
3092 return
3061 rev = nodemap.__getitem__
3093 rev = nodemap.__getitem__
3062
3094
3063 for i in range(count):
3095 for i in range(count):
3064 for node in nodes:
3096 for node in nodes:
3065 try:
3097 try:
3066 rev(node)
3098 rev(node)
3067 except error.RevlogError:
3099 except error.RevlogError:
3068 pass
3100 pass
3069
3101
3070 benches = [
3102 benches = [
3071 (constructor, b'revlog constructor'),
3103 (constructor, b'revlog constructor'),
3072 (read, b'read'),
3104 (read, b'read'),
3073 (parseindex, b'create index object'),
3105 (parseindex, b'create index object'),
3074 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3106 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3075 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3107 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3076 (lambda: resolvenode(node0), b'look up node at rev 0'),
3108 (lambda: resolvenode(node0), b'look up node at rev 0'),
3077 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3109 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3078 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3110 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3079 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3111 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3080 (lambda: resolvenode(node100), b'look up node at tip'),
3112 (lambda: resolvenode(node100), b'look up node at tip'),
3081 # 2x variation is to measure caching impact.
3113 # 2x variation is to measure caching impact.
3082 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3114 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3083 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3115 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3084 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3116 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3085 (
3117 (
3086 lambda: resolvenodes(allnodesrev, 2),
3118 lambda: resolvenodes(allnodesrev, 2),
3087 b'look up all nodes 2x (reverse)',
3119 b'look up all nodes 2x (reverse)',
3088 ),
3120 ),
3089 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3121 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3090 (
3122 (
3091 lambda: getentries(allrevs, 2),
3123 lambda: getentries(allrevs, 2),
3092 b'retrieve all index entries 2x (forward)',
3124 b'retrieve all index entries 2x (forward)',
3093 ),
3125 ),
3094 (
3126 (
3095 lambda: getentries(allrevsrev),
3127 lambda: getentries(allrevsrev),
3096 b'retrieve all index entries (reverse)',
3128 b'retrieve all index entries (reverse)',
3097 ),
3129 ),
3098 (
3130 (
3099 lambda: getentries(allrevsrev, 2),
3131 lambda: getentries(allrevsrev, 2),
3100 b'retrieve all index entries 2x (reverse)',
3132 b'retrieve all index entries 2x (reverse)',
3101 ),
3133 ),
3102 ]
3134 ]
3103
3135
3104 for fn, title in benches:
3136 for fn, title in benches:
3105 timer, fm = gettimer(ui, opts)
3137 timer, fm = gettimer(ui, opts)
3106 timer(fn, title=title)
3138 timer(fn, title=title)
3107 fm.end()
3139 fm.end()
3108
3140
3109
3141
3110 @command(
3142 @command(
3111 b'perf::revlogrevisions|perfrevlogrevisions',
3143 b'perf::revlogrevisions|perfrevlogrevisions',
3112 revlogopts
3144 revlogopts
3113 + formatteropts
3145 + formatteropts
3114 + [
3146 + [
3115 (b'd', b'dist', 100, b'distance between the revisions'),
3147 (b'd', b'dist', 100, b'distance between the revisions'),
3116 (b's', b'startrev', 0, b'revision to start reading at'),
3148 (b's', b'startrev', 0, b'revision to start reading at'),
3117 (b'', b'reverse', False, b'read in reverse'),
3149 (b'', b'reverse', False, b'read in reverse'),
3118 ],
3150 ],
3119 b'-c|-m|FILE',
3151 b'-c|-m|FILE',
3120 )
3152 )
3121 def perfrevlogrevisions(
3153 def perfrevlogrevisions(
3122 ui, repo, file_=None, startrev=0, reverse=False, **opts
3154 ui, repo, file_=None, startrev=0, reverse=False, **opts
3123 ):
3155 ):
3124 """Benchmark reading a series of revisions from a revlog.
3156 """Benchmark reading a series of revisions from a revlog.
3125
3157
3126 By default, we read every ``-d/--dist`` revision from 0 to tip of
3158 By default, we read every ``-d/--dist`` revision from 0 to tip of
3127 the specified revlog.
3159 the specified revlog.
3128
3160
3129 The start revision can be defined via ``-s/--startrev``.
3161 The start revision can be defined via ``-s/--startrev``.
3130 """
3162 """
3131 opts = _byteskwargs(opts)
3163 opts = _byteskwargs(opts)
3132
3164
3133 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3165 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3134 rllen = getlen(ui)(rl)
3166 rllen = getlen(ui)(rl)
3135
3167
3136 if startrev < 0:
3168 if startrev < 0:
3137 startrev = rllen + startrev
3169 startrev = rllen + startrev
3138
3170
3139 def d():
3171 def d():
3140 rl.clearcaches()
3172 rl.clearcaches()
3141
3173
3142 beginrev = startrev
3174 beginrev = startrev
3143 endrev = rllen
3175 endrev = rllen
3144 dist = opts[b'dist']
3176 dist = opts[b'dist']
3145
3177
3146 if reverse:
3178 if reverse:
3147 beginrev, endrev = endrev - 1, beginrev - 1
3179 beginrev, endrev = endrev - 1, beginrev - 1
3148 dist = -1 * dist
3180 dist = -1 * dist
3149
3181
3150 for x in _xrange(beginrev, endrev, dist):
3182 for x in _xrange(beginrev, endrev, dist):
3151 # Old revisions don't support passing int.
3183 # Old revisions don't support passing int.
3152 n = rl.node(x)
3184 n = rl.node(x)
3153 rl.revision(n)
3185 rl.revision(n)
3154
3186
3155 timer, fm = gettimer(ui, opts)
3187 timer, fm = gettimer(ui, opts)
3156 timer(d)
3188 timer(d)
3157 fm.end()
3189 fm.end()
3158
3190
3159
3191
3160 @command(
3192 @command(
3161 b'perf::revlogwrite|perfrevlogwrite',
3193 b'perf::revlogwrite|perfrevlogwrite',
3162 revlogopts
3194 revlogopts
3163 + formatteropts
3195 + formatteropts
3164 + [
3196 + [
3165 (b's', b'startrev', 1000, b'revision to start writing at'),
3197 (b's', b'startrev', 1000, b'revision to start writing at'),
3166 (b'', b'stoprev', -1, b'last revision to write'),
3198 (b'', b'stoprev', -1, b'last revision to write'),
3167 (b'', b'count', 3, b'number of passes to perform'),
3199 (b'', b'count', 3, b'number of passes to perform'),
3168 (b'', b'details', False, b'print timing for every revisions tested'),
3200 (b'', b'details', False, b'print timing for every revisions tested'),
3169 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3201 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3170 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3202 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3171 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3203 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3172 ],
3204 ],
3173 b'-c|-m|FILE',
3205 b'-c|-m|FILE',
3174 )
3206 )
3175 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3207 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3176 """Benchmark writing a series of revisions to a revlog.
3208 """Benchmark writing a series of revisions to a revlog.
3177
3209
3178 Possible source values are:
3210 Possible source values are:
3179 * `full`: add from a full text (default).
3211 * `full`: add from a full text (default).
3180 * `parent-1`: add from a delta to the first parent
3212 * `parent-1`: add from a delta to the first parent
3181 * `parent-2`: add from a delta to the second parent if it exists
3213 * `parent-2`: add from a delta to the second parent if it exists
3182 (use a delta from the first parent otherwise)
3214 (use a delta from the first parent otherwise)
3183 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3215 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3184 * `storage`: add from the existing precomputed deltas
3216 * `storage`: add from the existing precomputed deltas
3185
3217
3186 Note: This performance command measures performance in a custom way. As a
3218 Note: This performance command measures performance in a custom way. As a
3187 result some of the global configuration of the 'perf' command does not
3219 result some of the global configuration of the 'perf' command does not
3188 apply to it:
3220 apply to it:
3189
3221
3190 * ``pre-run``: disabled
3222 * ``pre-run``: disabled
3191
3223
3192 * ``profile-benchmark``: disabled
3224 * ``profile-benchmark``: disabled
3193
3225
3194 * ``run-limits``: disabled use --count instead
3226 * ``run-limits``: disabled use --count instead
3195 """
3227 """
3196 opts = _byteskwargs(opts)
3228 opts = _byteskwargs(opts)
3197
3229
3198 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3230 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3199 rllen = getlen(ui)(rl)
3231 rllen = getlen(ui)(rl)
3200 if startrev < 0:
3232 if startrev < 0:
3201 startrev = rllen + startrev
3233 startrev = rllen + startrev
3202 if stoprev < 0:
3234 if stoprev < 0:
3203 stoprev = rllen + stoprev
3235 stoprev = rllen + stoprev
3204
3236
3205 lazydeltabase = opts['lazydeltabase']
3237 lazydeltabase = opts['lazydeltabase']
3206 source = opts['source']
3238 source = opts['source']
3207 clearcaches = opts['clear_caches']
3239 clearcaches = opts['clear_caches']
3208 validsource = (
3240 validsource = (
3209 b'full',
3241 b'full',
3210 b'parent-1',
3242 b'parent-1',
3211 b'parent-2',
3243 b'parent-2',
3212 b'parent-smallest',
3244 b'parent-smallest',
3213 b'storage',
3245 b'storage',
3214 )
3246 )
3215 if source not in validsource:
3247 if source not in validsource:
3216 raise error.Abort('invalid source type: %s' % source)
3248 raise error.Abort('invalid source type: %s' % source)
3217
3249
3218 ### actually gather results
3250 ### actually gather results
3219 count = opts['count']
3251 count = opts['count']
3220 if count <= 0:
3252 if count <= 0:
3221 raise error.Abort('invalide run count: %d' % count)
3253 raise error.Abort('invalide run count: %d' % count)
3222 allresults = []
3254 allresults = []
3223 for c in range(count):
3255 for c in range(count):
3224 timing = _timeonewrite(
3256 timing = _timeonewrite(
3225 ui,
3257 ui,
3226 rl,
3258 rl,
3227 source,
3259 source,
3228 startrev,
3260 startrev,
3229 stoprev,
3261 stoprev,
3230 c + 1,
3262 c + 1,
3231 lazydeltabase=lazydeltabase,
3263 lazydeltabase=lazydeltabase,
3232 clearcaches=clearcaches,
3264 clearcaches=clearcaches,
3233 )
3265 )
3234 allresults.append(timing)
3266 allresults.append(timing)
3235
3267
3236 ### consolidate the results in a single list
3268 ### consolidate the results in a single list
3237 results = []
3269 results = []
3238 for idx, (rev, t) in enumerate(allresults[0]):
3270 for idx, (rev, t) in enumerate(allresults[0]):
3239 ts = [t]
3271 ts = [t]
3240 for other in allresults[1:]:
3272 for other in allresults[1:]:
3241 orev, ot = other[idx]
3273 orev, ot = other[idx]
3242 assert orev == rev
3274 assert orev == rev
3243 ts.append(ot)
3275 ts.append(ot)
3244 results.append((rev, ts))
3276 results.append((rev, ts))
3245 resultcount = len(results)
3277 resultcount = len(results)
3246
3278
3247 ### Compute and display relevant statistics
3279 ### Compute and display relevant statistics
3248
3280
3249 # get a formatter
3281 # get a formatter
3250 fm = ui.formatter(b'perf', opts)
3282 fm = ui.formatter(b'perf', opts)
3251 displayall = ui.configbool(b"perf", b"all-timing", False)
3283 displayall = ui.configbool(b"perf", b"all-timing", False)
3252
3284
3253 # print individual details if requested
3285 # print individual details if requested
3254 if opts['details']:
3286 if opts['details']:
3255 for idx, item in enumerate(results, 1):
3287 for idx, item in enumerate(results, 1):
3256 rev, data = item
3288 rev, data = item
3257 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3289 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3258 formatone(fm, data, title=title, displayall=displayall)
3290 formatone(fm, data, title=title, displayall=displayall)
3259
3291
3260 # sorts results by median time
3292 # sorts results by median time
3261 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3293 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3262 # list of (name, index) to display)
3294 # list of (name, index) to display)
3263 relevants = [
3295 relevants = [
3264 ("min", 0),
3296 ("min", 0),
3265 ("10%", resultcount * 10 // 100),
3297 ("10%", resultcount * 10 // 100),
3266 ("25%", resultcount * 25 // 100),
3298 ("25%", resultcount * 25 // 100),
3267 ("50%", resultcount * 70 // 100),
3299 ("50%", resultcount * 70 // 100),
3268 ("75%", resultcount * 75 // 100),
3300 ("75%", resultcount * 75 // 100),
3269 ("90%", resultcount * 90 // 100),
3301 ("90%", resultcount * 90 // 100),
3270 ("95%", resultcount * 95 // 100),
3302 ("95%", resultcount * 95 // 100),
3271 ("99%", resultcount * 99 // 100),
3303 ("99%", resultcount * 99 // 100),
3272 ("99.9%", resultcount * 999 // 1000),
3304 ("99.9%", resultcount * 999 // 1000),
3273 ("99.99%", resultcount * 9999 // 10000),
3305 ("99.99%", resultcount * 9999 // 10000),
3274 ("99.999%", resultcount * 99999 // 100000),
3306 ("99.999%", resultcount * 99999 // 100000),
3275 ("max", -1),
3307 ("max", -1),
3276 ]
3308 ]
3277 if not ui.quiet:
3309 if not ui.quiet:
3278 for name, idx in relevants:
3310 for name, idx in relevants:
3279 data = results[idx]
3311 data = results[idx]
3280 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3312 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3281 formatone(fm, data[1], title=title, displayall=displayall)
3313 formatone(fm, data[1], title=title, displayall=displayall)
3282
3314
3283 # XXX summing that many float will not be very precise, we ignore this fact
3315 # XXX summing that many float will not be very precise, we ignore this fact
3284 # for now
3316 # for now
3285 totaltime = []
3317 totaltime = []
3286 for item in allresults:
3318 for item in allresults:
3287 totaltime.append(
3319 totaltime.append(
3288 (
3320 (
3289 sum(x[1][0] for x in item),
3321 sum(x[1][0] for x in item),
3290 sum(x[1][1] for x in item),
3322 sum(x[1][1] for x in item),
3291 sum(x[1][2] for x in item),
3323 sum(x[1][2] for x in item),
3292 )
3324 )
3293 )
3325 )
3294 formatone(
3326 formatone(
3295 fm,
3327 fm,
3296 totaltime,
3328 totaltime,
3297 title="total time (%d revs)" % resultcount,
3329 title="total time (%d revs)" % resultcount,
3298 displayall=displayall,
3330 displayall=displayall,
3299 )
3331 )
3300 fm.end()
3332 fm.end()
3301
3333
3302
3334
3303 class _faketr:
3335 class _faketr:
3304 def add(s, x, y, z=None):
3336 def add(s, x, y, z=None):
3305 return None
3337 return None
3306
3338
3307
3339
3308 def _timeonewrite(
3340 def _timeonewrite(
3309 ui,
3341 ui,
3310 orig,
3342 orig,
3311 source,
3343 source,
3312 startrev,
3344 startrev,
3313 stoprev,
3345 stoprev,
3314 runidx=None,
3346 runidx=None,
3315 lazydeltabase=True,
3347 lazydeltabase=True,
3316 clearcaches=True,
3348 clearcaches=True,
3317 ):
3349 ):
3318 timings = []
3350 timings = []
3319 tr = _faketr()
3351 tr = _faketr()
3320 with _temprevlog(ui, orig, startrev) as dest:
3352 with _temprevlog(ui, orig, startrev) as dest:
3321 dest._lazydeltabase = lazydeltabase
3353 dest._lazydeltabase = lazydeltabase
3322 revs = list(orig.revs(startrev, stoprev))
3354 revs = list(orig.revs(startrev, stoprev))
3323 total = len(revs)
3355 total = len(revs)
3324 topic = 'adding'
3356 topic = 'adding'
3325 if runidx is not None:
3357 if runidx is not None:
3326 topic += ' (run #%d)' % runidx
3358 topic += ' (run #%d)' % runidx
3327 # Support both old and new progress API
3359 # Support both old and new progress API
3328 if util.safehasattr(ui, 'makeprogress'):
3360 if util.safehasattr(ui, 'makeprogress'):
3329 progress = ui.makeprogress(topic, unit='revs', total=total)
3361 progress = ui.makeprogress(topic, unit='revs', total=total)
3330
3362
3331 def updateprogress(pos):
3363 def updateprogress(pos):
3332 progress.update(pos)
3364 progress.update(pos)
3333
3365
3334 def completeprogress():
3366 def completeprogress():
3335 progress.complete()
3367 progress.complete()
3336
3368
3337 else:
3369 else:
3338
3370
3339 def updateprogress(pos):
3371 def updateprogress(pos):
3340 ui.progress(topic, pos, unit='revs', total=total)
3372 ui.progress(topic, pos, unit='revs', total=total)
3341
3373
3342 def completeprogress():
3374 def completeprogress():
3343 ui.progress(topic, None, unit='revs', total=total)
3375 ui.progress(topic, None, unit='revs', total=total)
3344
3376
3345 for idx, rev in enumerate(revs):
3377 for idx, rev in enumerate(revs):
3346 updateprogress(idx)
3378 updateprogress(idx)
3347 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3379 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3348 if clearcaches:
3380 if clearcaches:
3349 dest.index.clearcaches()
3381 dest.index.clearcaches()
3350 dest.clearcaches()
3382 dest.clearcaches()
3351 with timeone() as r:
3383 with timeone() as r:
3352 dest.addrawrevision(*addargs, **addkwargs)
3384 dest.addrawrevision(*addargs, **addkwargs)
3353 timings.append((rev, r[0]))
3385 timings.append((rev, r[0]))
3354 updateprogress(total)
3386 updateprogress(total)
3355 completeprogress()
3387 completeprogress()
3356 return timings
3388 return timings
3357
3389
3358
3390
3359 def _getrevisionseed(orig, rev, tr, source):
3391 def _getrevisionseed(orig, rev, tr, source):
3360 from mercurial.node import nullid
3392 from mercurial.node import nullid
3361
3393
3362 linkrev = orig.linkrev(rev)
3394 linkrev = orig.linkrev(rev)
3363 node = orig.node(rev)
3395 node = orig.node(rev)
3364 p1, p2 = orig.parents(node)
3396 p1, p2 = orig.parents(node)
3365 flags = orig.flags(rev)
3397 flags = orig.flags(rev)
3366 cachedelta = None
3398 cachedelta = None
3367 text = None
3399 text = None
3368
3400
3369 if source == b'full':
3401 if source == b'full':
3370 text = orig.revision(rev)
3402 text = orig.revision(rev)
3371 elif source == b'parent-1':
3403 elif source == b'parent-1':
3372 baserev = orig.rev(p1)
3404 baserev = orig.rev(p1)
3373 cachedelta = (baserev, orig.revdiff(p1, rev))
3405 cachedelta = (baserev, orig.revdiff(p1, rev))
3374 elif source == b'parent-2':
3406 elif source == b'parent-2':
3375 parent = p2
3407 parent = p2
3376 if p2 == nullid:
3408 if p2 == nullid:
3377 parent = p1
3409 parent = p1
3378 baserev = orig.rev(parent)
3410 baserev = orig.rev(parent)
3379 cachedelta = (baserev, orig.revdiff(parent, rev))
3411 cachedelta = (baserev, orig.revdiff(parent, rev))
3380 elif source == b'parent-smallest':
3412 elif source == b'parent-smallest':
3381 p1diff = orig.revdiff(p1, rev)
3413 p1diff = orig.revdiff(p1, rev)
3382 parent = p1
3414 parent = p1
3383 diff = p1diff
3415 diff = p1diff
3384 if p2 != nullid:
3416 if p2 != nullid:
3385 p2diff = orig.revdiff(p2, rev)
3417 p2diff = orig.revdiff(p2, rev)
3386 if len(p1diff) > len(p2diff):
3418 if len(p1diff) > len(p2diff):
3387 parent = p2
3419 parent = p2
3388 diff = p2diff
3420 diff = p2diff
3389 baserev = orig.rev(parent)
3421 baserev = orig.rev(parent)
3390 cachedelta = (baserev, diff)
3422 cachedelta = (baserev, diff)
3391 elif source == b'storage':
3423 elif source == b'storage':
3392 baserev = orig.deltaparent(rev)
3424 baserev = orig.deltaparent(rev)
3393 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3425 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3394
3426
3395 return (
3427 return (
3396 (text, tr, linkrev, p1, p2),
3428 (text, tr, linkrev, p1, p2),
3397 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3429 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3398 )
3430 )
3399
3431
3400
3432
3401 @contextlib.contextmanager
3433 @contextlib.contextmanager
3402 def _temprevlog(ui, orig, truncaterev):
3434 def _temprevlog(ui, orig, truncaterev):
3403 from mercurial import vfs as vfsmod
3435 from mercurial import vfs as vfsmod
3404
3436
3405 if orig._inline:
3437 if orig._inline:
3406 raise error.Abort('not supporting inline revlog (yet)')
3438 raise error.Abort('not supporting inline revlog (yet)')
3407 revlogkwargs = {}
3439 revlogkwargs = {}
3408 k = 'upperboundcomp'
3440 k = 'upperboundcomp'
3409 if util.safehasattr(orig, k):
3441 if util.safehasattr(orig, k):
3410 revlogkwargs[k] = getattr(orig, k)
3442 revlogkwargs[k] = getattr(orig, k)
3411
3443
3412 indexfile = getattr(orig, '_indexfile', None)
3444 indexfile = getattr(orig, '_indexfile', None)
3413 if indexfile is None:
3445 if indexfile is None:
3414 # compatibility with <= hg-5.8
3446 # compatibility with <= hg-5.8
3415 indexfile = getattr(orig, 'indexfile')
3447 indexfile = getattr(orig, 'indexfile')
3416 origindexpath = orig.opener.join(indexfile)
3448 origindexpath = orig.opener.join(indexfile)
3417
3449
3418 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3450 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3419 origdatapath = orig.opener.join(datafile)
3451 origdatapath = orig.opener.join(datafile)
3420 radix = b'revlog'
3452 radix = b'revlog'
3421 indexname = b'revlog.i'
3453 indexname = b'revlog.i'
3422 dataname = b'revlog.d'
3454 dataname = b'revlog.d'
3423
3455
3424 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3456 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3425 try:
3457 try:
3426 # copy the data file in a temporary directory
3458 # copy the data file in a temporary directory
3427 ui.debug('copying data in %s\n' % tmpdir)
3459 ui.debug('copying data in %s\n' % tmpdir)
3428 destindexpath = os.path.join(tmpdir, 'revlog.i')
3460 destindexpath = os.path.join(tmpdir, 'revlog.i')
3429 destdatapath = os.path.join(tmpdir, 'revlog.d')
3461 destdatapath = os.path.join(tmpdir, 'revlog.d')
3430 shutil.copyfile(origindexpath, destindexpath)
3462 shutil.copyfile(origindexpath, destindexpath)
3431 shutil.copyfile(origdatapath, destdatapath)
3463 shutil.copyfile(origdatapath, destdatapath)
3432
3464
3433 # remove the data we want to add again
3465 # remove the data we want to add again
3434 ui.debug('truncating data to be rewritten\n')
3466 ui.debug('truncating data to be rewritten\n')
3435 with open(destindexpath, 'ab') as index:
3467 with open(destindexpath, 'ab') as index:
3436 index.seek(0)
3468 index.seek(0)
3437 index.truncate(truncaterev * orig._io.size)
3469 index.truncate(truncaterev * orig._io.size)
3438 with open(destdatapath, 'ab') as data:
3470 with open(destdatapath, 'ab') as data:
3439 data.seek(0)
3471 data.seek(0)
3440 data.truncate(orig.start(truncaterev))
3472 data.truncate(orig.start(truncaterev))
3441
3473
3442 # instantiate a new revlog from the temporary copy
3474 # instantiate a new revlog from the temporary copy
3443 ui.debug('truncating adding to be rewritten\n')
3475 ui.debug('truncating adding to be rewritten\n')
3444 vfs = vfsmod.vfs(tmpdir)
3476 vfs = vfsmod.vfs(tmpdir)
3445 vfs.options = getattr(orig.opener, 'options', None)
3477 vfs.options = getattr(orig.opener, 'options', None)
3446
3478
3447 try:
3479 try:
3448 dest = revlog(vfs, radix=radix, **revlogkwargs)
3480 dest = revlog(vfs, radix=radix, **revlogkwargs)
3449 except TypeError:
3481 except TypeError:
3450 dest = revlog(
3482 dest = revlog(
3451 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3483 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3452 )
3484 )
3453 if dest._inline:
3485 if dest._inline:
3454 raise error.Abort('not supporting inline revlog (yet)')
3486 raise error.Abort('not supporting inline revlog (yet)')
3455 # make sure internals are initialized
3487 # make sure internals are initialized
3456 dest.revision(len(dest) - 1)
3488 dest.revision(len(dest) - 1)
3457 yield dest
3489 yield dest
3458 del dest, vfs
3490 del dest, vfs
3459 finally:
3491 finally:
3460 shutil.rmtree(tmpdir, True)
3492 shutil.rmtree(tmpdir, True)
3461
3493
3462
3494
3463 @command(
3495 @command(
3464 b'perf::revlogchunks|perfrevlogchunks',
3496 b'perf::revlogchunks|perfrevlogchunks',
3465 revlogopts
3497 revlogopts
3466 + formatteropts
3498 + formatteropts
3467 + [
3499 + [
3468 (b'e', b'engines', b'', b'compression engines to use'),
3500 (b'e', b'engines', b'', b'compression engines to use'),
3469 (b's', b'startrev', 0, b'revision to start at'),
3501 (b's', b'startrev', 0, b'revision to start at'),
3470 ],
3502 ],
3471 b'-c|-m|FILE',
3503 b'-c|-m|FILE',
3472 )
3504 )
3473 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3505 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3474 """Benchmark operations on revlog chunks.
3506 """Benchmark operations on revlog chunks.
3475
3507
3476 Logically, each revlog is a collection of fulltext revisions. However,
3508 Logically, each revlog is a collection of fulltext revisions. However,
3477 stored within each revlog are "chunks" of possibly compressed data. This
3509 stored within each revlog are "chunks" of possibly compressed data. This
3478 data needs to be read and decompressed or compressed and written.
3510 data needs to be read and decompressed or compressed and written.
3479
3511
3480 This command measures the time it takes to read+decompress and recompress
3512 This command measures the time it takes to read+decompress and recompress
3481 chunks in a revlog. It effectively isolates I/O and compression performance.
3513 chunks in a revlog. It effectively isolates I/O and compression performance.
3482 For measurements of higher-level operations like resolving revisions,
3514 For measurements of higher-level operations like resolving revisions,
3483 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3515 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3484 """
3516 """
3485 opts = _byteskwargs(opts)
3517 opts = _byteskwargs(opts)
3486
3518
3487 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3519 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3488
3520
3489 # _chunkraw was renamed to _getsegmentforrevs.
3521 # _chunkraw was renamed to _getsegmentforrevs.
3490 try:
3522 try:
3491 segmentforrevs = rl._getsegmentforrevs
3523 segmentforrevs = rl._getsegmentforrevs
3492 except AttributeError:
3524 except AttributeError:
3493 segmentforrevs = rl._chunkraw
3525 segmentforrevs = rl._chunkraw
3494
3526
3495 # Verify engines argument.
3527 # Verify engines argument.
3496 if engines:
3528 if engines:
3497 engines = {e.strip() for e in engines.split(b',')}
3529 engines = {e.strip() for e in engines.split(b',')}
3498 for engine in engines:
3530 for engine in engines:
3499 try:
3531 try:
3500 util.compressionengines[engine]
3532 util.compressionengines[engine]
3501 except KeyError:
3533 except KeyError:
3502 raise error.Abort(b'unknown compression engine: %s' % engine)
3534 raise error.Abort(b'unknown compression engine: %s' % engine)
3503 else:
3535 else:
3504 engines = []
3536 engines = []
3505 for e in util.compengines:
3537 for e in util.compengines:
3506 engine = util.compengines[e]
3538 engine = util.compengines[e]
3507 try:
3539 try:
3508 if engine.available():
3540 if engine.available():
3509 engine.revlogcompressor().compress(b'dummy')
3541 engine.revlogcompressor().compress(b'dummy')
3510 engines.append(e)
3542 engines.append(e)
3511 except NotImplementedError:
3543 except NotImplementedError:
3512 pass
3544 pass
3513
3545
3514 revs = list(rl.revs(startrev, len(rl) - 1))
3546 revs = list(rl.revs(startrev, len(rl) - 1))
3515
3547
3516 def rlfh(rl):
3548 def rlfh(rl):
3517 if rl._inline:
3549 if rl._inline:
3518 indexfile = getattr(rl, '_indexfile', None)
3550 indexfile = getattr(rl, '_indexfile', None)
3519 if indexfile is None:
3551 if indexfile is None:
3520 # compatibility with <= hg-5.8
3552 # compatibility with <= hg-5.8
3521 indexfile = getattr(rl, 'indexfile')
3553 indexfile = getattr(rl, 'indexfile')
3522 return getsvfs(repo)(indexfile)
3554 return getsvfs(repo)(indexfile)
3523 else:
3555 else:
3524 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3556 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3525 return getsvfs(repo)(datafile)
3557 return getsvfs(repo)(datafile)
3526
3558
3527 def doread():
3559 def doread():
3528 rl.clearcaches()
3560 rl.clearcaches()
3529 for rev in revs:
3561 for rev in revs:
3530 segmentforrevs(rev, rev)
3562 segmentforrevs(rev, rev)
3531
3563
3532 def doreadcachedfh():
3564 def doreadcachedfh():
3533 rl.clearcaches()
3565 rl.clearcaches()
3534 fh = rlfh(rl)
3566 fh = rlfh(rl)
3535 for rev in revs:
3567 for rev in revs:
3536 segmentforrevs(rev, rev, df=fh)
3568 segmentforrevs(rev, rev, df=fh)
3537
3569
3538 def doreadbatch():
3570 def doreadbatch():
3539 rl.clearcaches()
3571 rl.clearcaches()
3540 segmentforrevs(revs[0], revs[-1])
3572 segmentforrevs(revs[0], revs[-1])
3541
3573
3542 def doreadbatchcachedfh():
3574 def doreadbatchcachedfh():
3543 rl.clearcaches()
3575 rl.clearcaches()
3544 fh = rlfh(rl)
3576 fh = rlfh(rl)
3545 segmentforrevs(revs[0], revs[-1], df=fh)
3577 segmentforrevs(revs[0], revs[-1], df=fh)
3546
3578
3547 def dochunk():
3579 def dochunk():
3548 rl.clearcaches()
3580 rl.clearcaches()
3549 fh = rlfh(rl)
3581 fh = rlfh(rl)
3550 for rev in revs:
3582 for rev in revs:
3551 rl._chunk(rev, df=fh)
3583 rl._chunk(rev, df=fh)
3552
3584
3553 chunks = [None]
3585 chunks = [None]
3554
3586
3555 def dochunkbatch():
3587 def dochunkbatch():
3556 rl.clearcaches()
3588 rl.clearcaches()
3557 fh = rlfh(rl)
3589 fh = rlfh(rl)
3558 # Save chunks as a side-effect.
3590 # Save chunks as a side-effect.
3559 chunks[0] = rl._chunks(revs, df=fh)
3591 chunks[0] = rl._chunks(revs, df=fh)
3560
3592
3561 def docompress(compressor):
3593 def docompress(compressor):
3562 rl.clearcaches()
3594 rl.clearcaches()
3563
3595
3564 try:
3596 try:
3565 # Swap in the requested compression engine.
3597 # Swap in the requested compression engine.
3566 oldcompressor = rl._compressor
3598 oldcompressor = rl._compressor
3567 rl._compressor = compressor
3599 rl._compressor = compressor
3568 for chunk in chunks[0]:
3600 for chunk in chunks[0]:
3569 rl.compress(chunk)
3601 rl.compress(chunk)
3570 finally:
3602 finally:
3571 rl._compressor = oldcompressor
3603 rl._compressor = oldcompressor
3572
3604
3573 benches = [
3605 benches = [
3574 (lambda: doread(), b'read'),
3606 (lambda: doread(), b'read'),
3575 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3607 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3576 (lambda: doreadbatch(), b'read batch'),
3608 (lambda: doreadbatch(), b'read batch'),
3577 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3609 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3578 (lambda: dochunk(), b'chunk'),
3610 (lambda: dochunk(), b'chunk'),
3579 (lambda: dochunkbatch(), b'chunk batch'),
3611 (lambda: dochunkbatch(), b'chunk batch'),
3580 ]
3612 ]
3581
3613
3582 for engine in sorted(engines):
3614 for engine in sorted(engines):
3583 compressor = util.compengines[engine].revlogcompressor()
3615 compressor = util.compengines[engine].revlogcompressor()
3584 benches.append(
3616 benches.append(
3585 (
3617 (
3586 functools.partial(docompress, compressor),
3618 functools.partial(docompress, compressor),
3587 b'compress w/ %s' % engine,
3619 b'compress w/ %s' % engine,
3588 )
3620 )
3589 )
3621 )
3590
3622
3591 for fn, title in benches:
3623 for fn, title in benches:
3592 timer, fm = gettimer(ui, opts)
3624 timer, fm = gettimer(ui, opts)
3593 timer(fn, title=title)
3625 timer(fn, title=title)
3594 fm.end()
3626 fm.end()
3595
3627
3596
3628
3597 @command(
3629 @command(
3598 b'perf::revlogrevision|perfrevlogrevision',
3630 b'perf::revlogrevision|perfrevlogrevision',
3599 revlogopts
3631 revlogopts
3600 + formatteropts
3632 + formatteropts
3601 + [(b'', b'cache', False, b'use caches instead of clearing')],
3633 + [(b'', b'cache', False, b'use caches instead of clearing')],
3602 b'-c|-m|FILE REV',
3634 b'-c|-m|FILE REV',
3603 )
3635 )
3604 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3636 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3605 """Benchmark obtaining a revlog revision.
3637 """Benchmark obtaining a revlog revision.
3606
3638
3607 Obtaining a revlog revision consists of roughly the following steps:
3639 Obtaining a revlog revision consists of roughly the following steps:
3608
3640
3609 1. Compute the delta chain
3641 1. Compute the delta chain
3610 2. Slice the delta chain if applicable
3642 2. Slice the delta chain if applicable
3611 3. Obtain the raw chunks for that delta chain
3643 3. Obtain the raw chunks for that delta chain
3612 4. Decompress each raw chunk
3644 4. Decompress each raw chunk
3613 5. Apply binary patches to obtain fulltext
3645 5. Apply binary patches to obtain fulltext
3614 6. Verify hash of fulltext
3646 6. Verify hash of fulltext
3615
3647
3616 This command measures the time spent in each of these phases.
3648 This command measures the time spent in each of these phases.
3617 """
3649 """
3618 opts = _byteskwargs(opts)
3650 opts = _byteskwargs(opts)
3619
3651
3620 if opts.get(b'changelog') or opts.get(b'manifest'):
3652 if opts.get(b'changelog') or opts.get(b'manifest'):
3621 file_, rev = None, file_
3653 file_, rev = None, file_
3622 elif rev is None:
3654 elif rev is None:
3623 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3655 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3624
3656
3625 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3657 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3626
3658
3627 # _chunkraw was renamed to _getsegmentforrevs.
3659 # _chunkraw was renamed to _getsegmentforrevs.
3628 try:
3660 try:
3629 segmentforrevs = r._getsegmentforrevs
3661 segmentforrevs = r._getsegmentforrevs
3630 except AttributeError:
3662 except AttributeError:
3631 segmentforrevs = r._chunkraw
3663 segmentforrevs = r._chunkraw
3632
3664
3633 node = r.lookup(rev)
3665 node = r.lookup(rev)
3634 rev = r.rev(node)
3666 rev = r.rev(node)
3635
3667
3636 def getrawchunks(data, chain):
3668 def getrawchunks(data, chain):
3637 start = r.start
3669 start = r.start
3638 length = r.length
3670 length = r.length
3639 inline = r._inline
3671 inline = r._inline
3640 try:
3672 try:
3641 iosize = r.index.entry_size
3673 iosize = r.index.entry_size
3642 except AttributeError:
3674 except AttributeError:
3643 iosize = r._io.size
3675 iosize = r._io.size
3644 buffer = util.buffer
3676 buffer = util.buffer
3645
3677
3646 chunks = []
3678 chunks = []
3647 ladd = chunks.append
3679 ladd = chunks.append
3648 for idx, item in enumerate(chain):
3680 for idx, item in enumerate(chain):
3649 offset = start(item[0])
3681 offset = start(item[0])
3650 bits = data[idx]
3682 bits = data[idx]
3651 for rev in item:
3683 for rev in item:
3652 chunkstart = start(rev)
3684 chunkstart = start(rev)
3653 if inline:
3685 if inline:
3654 chunkstart += (rev + 1) * iosize
3686 chunkstart += (rev + 1) * iosize
3655 chunklength = length(rev)
3687 chunklength = length(rev)
3656 ladd(buffer(bits, chunkstart - offset, chunklength))
3688 ladd(buffer(bits, chunkstart - offset, chunklength))
3657
3689
3658 return chunks
3690 return chunks
3659
3691
3660 def dodeltachain(rev):
3692 def dodeltachain(rev):
3661 if not cache:
3693 if not cache:
3662 r.clearcaches()
3694 r.clearcaches()
3663 r._deltachain(rev)
3695 r._deltachain(rev)
3664
3696
3665 def doread(chain):
3697 def doread(chain):
3666 if not cache:
3698 if not cache:
3667 r.clearcaches()
3699 r.clearcaches()
3668 for item in slicedchain:
3700 for item in slicedchain:
3669 segmentforrevs(item[0], item[-1])
3701 segmentforrevs(item[0], item[-1])
3670
3702
3671 def doslice(r, chain, size):
3703 def doslice(r, chain, size):
3672 for s in slicechunk(r, chain, targetsize=size):
3704 for s in slicechunk(r, chain, targetsize=size):
3673 pass
3705 pass
3674
3706
3675 def dorawchunks(data, chain):
3707 def dorawchunks(data, chain):
3676 if not cache:
3708 if not cache:
3677 r.clearcaches()
3709 r.clearcaches()
3678 getrawchunks(data, chain)
3710 getrawchunks(data, chain)
3679
3711
3680 def dodecompress(chunks):
3712 def dodecompress(chunks):
3681 decomp = r.decompress
3713 decomp = r.decompress
3682 for chunk in chunks:
3714 for chunk in chunks:
3683 decomp(chunk)
3715 decomp(chunk)
3684
3716
3685 def dopatch(text, bins):
3717 def dopatch(text, bins):
3686 if not cache:
3718 if not cache:
3687 r.clearcaches()
3719 r.clearcaches()
3688 mdiff.patches(text, bins)
3720 mdiff.patches(text, bins)
3689
3721
3690 def dohash(text):
3722 def dohash(text):
3691 if not cache:
3723 if not cache:
3692 r.clearcaches()
3724 r.clearcaches()
3693 r.checkhash(text, node, rev=rev)
3725 r.checkhash(text, node, rev=rev)
3694
3726
3695 def dorevision():
3727 def dorevision():
3696 if not cache:
3728 if not cache:
3697 r.clearcaches()
3729 r.clearcaches()
3698 r.revision(node)
3730 r.revision(node)
3699
3731
3700 try:
3732 try:
3701 from mercurial.revlogutils.deltas import slicechunk
3733 from mercurial.revlogutils.deltas import slicechunk
3702 except ImportError:
3734 except ImportError:
3703 slicechunk = getattr(revlog, '_slicechunk', None)
3735 slicechunk = getattr(revlog, '_slicechunk', None)
3704
3736
3705 size = r.length(rev)
3737 size = r.length(rev)
3706 chain = r._deltachain(rev)[0]
3738 chain = r._deltachain(rev)[0]
3707 if not getattr(r, '_withsparseread', False):
3739 if not getattr(r, '_withsparseread', False):
3708 slicedchain = (chain,)
3740 slicedchain = (chain,)
3709 else:
3741 else:
3710 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3742 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3711 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3743 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3712 rawchunks = getrawchunks(data, slicedchain)
3744 rawchunks = getrawchunks(data, slicedchain)
3713 bins = r._chunks(chain)
3745 bins = r._chunks(chain)
3714 text = bytes(bins[0])
3746 text = bytes(bins[0])
3715 bins = bins[1:]
3747 bins = bins[1:]
3716 text = mdiff.patches(text, bins)
3748 text = mdiff.patches(text, bins)
3717
3749
3718 benches = [
3750 benches = [
3719 (lambda: dorevision(), b'full'),
3751 (lambda: dorevision(), b'full'),
3720 (lambda: dodeltachain(rev), b'deltachain'),
3752 (lambda: dodeltachain(rev), b'deltachain'),
3721 (lambda: doread(chain), b'read'),
3753 (lambda: doread(chain), b'read'),
3722 ]
3754 ]
3723
3755
3724 if getattr(r, '_withsparseread', False):
3756 if getattr(r, '_withsparseread', False):
3725 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3757 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3726 benches.append(slicing)
3758 benches.append(slicing)
3727
3759
3728 benches.extend(
3760 benches.extend(
3729 [
3761 [
3730 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3762 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3731 (lambda: dodecompress(rawchunks), b'decompress'),
3763 (lambda: dodecompress(rawchunks), b'decompress'),
3732 (lambda: dopatch(text, bins), b'patch'),
3764 (lambda: dopatch(text, bins), b'patch'),
3733 (lambda: dohash(text), b'hash'),
3765 (lambda: dohash(text), b'hash'),
3734 ]
3766 ]
3735 )
3767 )
3736
3768
3737 timer, fm = gettimer(ui, opts)
3769 timer, fm = gettimer(ui, opts)
3738 for fn, title in benches:
3770 for fn, title in benches:
3739 timer(fn, title=title)
3771 timer(fn, title=title)
3740 fm.end()
3772 fm.end()
3741
3773
3742
3774
3743 @command(
3775 @command(
3744 b'perf::revset|perfrevset',
3776 b'perf::revset|perfrevset',
3745 [
3777 [
3746 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3778 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3747 (b'', b'contexts', False, b'obtain changectx for each revision'),
3779 (b'', b'contexts', False, b'obtain changectx for each revision'),
3748 ]
3780 ]
3749 + formatteropts,
3781 + formatteropts,
3750 b"REVSET",
3782 b"REVSET",
3751 )
3783 )
3752 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3784 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3753 """benchmark the execution time of a revset
3785 """benchmark the execution time of a revset
3754
3786
3755 Use the --clean option if need to evaluate the impact of build volatile
3787 Use the --clean option if need to evaluate the impact of build volatile
3756 revisions set cache on the revset execution. Volatile cache hold filtered
3788 revisions set cache on the revset execution. Volatile cache hold filtered
3757 and obsolete related cache."""
3789 and obsolete related cache."""
3758 opts = _byteskwargs(opts)
3790 opts = _byteskwargs(opts)
3759
3791
3760 timer, fm = gettimer(ui, opts)
3792 timer, fm = gettimer(ui, opts)
3761
3793
3762 def d():
3794 def d():
3763 if clear:
3795 if clear:
3764 repo.invalidatevolatilesets()
3796 repo.invalidatevolatilesets()
3765 if contexts:
3797 if contexts:
3766 for ctx in repo.set(expr):
3798 for ctx in repo.set(expr):
3767 pass
3799 pass
3768 else:
3800 else:
3769 for r in repo.revs(expr):
3801 for r in repo.revs(expr):
3770 pass
3802 pass
3771
3803
3772 timer(d)
3804 timer(d)
3773 fm.end()
3805 fm.end()
3774
3806
3775
3807
3776 @command(
3808 @command(
3777 b'perf::volatilesets|perfvolatilesets',
3809 b'perf::volatilesets|perfvolatilesets',
3778 [
3810 [
3779 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3811 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3780 ]
3812 ]
3781 + formatteropts,
3813 + formatteropts,
3782 )
3814 )
3783 def perfvolatilesets(ui, repo, *names, **opts):
3815 def perfvolatilesets(ui, repo, *names, **opts):
3784 """benchmark the computation of various volatile set
3816 """benchmark the computation of various volatile set
3785
3817
3786 Volatile set computes element related to filtering and obsolescence."""
3818 Volatile set computes element related to filtering and obsolescence."""
3787 opts = _byteskwargs(opts)
3819 opts = _byteskwargs(opts)
3788 timer, fm = gettimer(ui, opts)
3820 timer, fm = gettimer(ui, opts)
3789 repo = repo.unfiltered()
3821 repo = repo.unfiltered()
3790
3822
3791 def getobs(name):
3823 def getobs(name):
3792 def d():
3824 def d():
3793 repo.invalidatevolatilesets()
3825 repo.invalidatevolatilesets()
3794 if opts[b'clear_obsstore']:
3826 if opts[b'clear_obsstore']:
3795 clearfilecache(repo, b'obsstore')
3827 clearfilecache(repo, b'obsstore')
3796 obsolete.getrevs(repo, name)
3828 obsolete.getrevs(repo, name)
3797
3829
3798 return d
3830 return d
3799
3831
3800 allobs = sorted(obsolete.cachefuncs)
3832 allobs = sorted(obsolete.cachefuncs)
3801 if names:
3833 if names:
3802 allobs = [n for n in allobs if n in names]
3834 allobs = [n for n in allobs if n in names]
3803
3835
3804 for name in allobs:
3836 for name in allobs:
3805 timer(getobs(name), title=name)
3837 timer(getobs(name), title=name)
3806
3838
3807 def getfiltered(name):
3839 def getfiltered(name):
3808 def d():
3840 def d():
3809 repo.invalidatevolatilesets()
3841 repo.invalidatevolatilesets()
3810 if opts[b'clear_obsstore']:
3842 if opts[b'clear_obsstore']:
3811 clearfilecache(repo, b'obsstore')
3843 clearfilecache(repo, b'obsstore')
3812 repoview.filterrevs(repo, name)
3844 repoview.filterrevs(repo, name)
3813
3845
3814 return d
3846 return d
3815
3847
3816 allfilter = sorted(repoview.filtertable)
3848 allfilter = sorted(repoview.filtertable)
3817 if names:
3849 if names:
3818 allfilter = [n for n in allfilter if n in names]
3850 allfilter = [n for n in allfilter if n in names]
3819
3851
3820 for name in allfilter:
3852 for name in allfilter:
3821 timer(getfiltered(name), title=name)
3853 timer(getfiltered(name), title=name)
3822 fm.end()
3854 fm.end()
3823
3855
3824
3856
3825 @command(
3857 @command(
3826 b'perf::branchmap|perfbranchmap',
3858 b'perf::branchmap|perfbranchmap',
3827 [
3859 [
3828 (b'f', b'full', False, b'Includes build time of subset'),
3860 (b'f', b'full', False, b'Includes build time of subset'),
3829 (
3861 (
3830 b'',
3862 b'',
3831 b'clear-revbranch',
3863 b'clear-revbranch',
3832 False,
3864 False,
3833 b'purge the revbranch cache between computation',
3865 b'purge the revbranch cache between computation',
3834 ),
3866 ),
3835 ]
3867 ]
3836 + formatteropts,
3868 + formatteropts,
3837 )
3869 )
3838 def perfbranchmap(ui, repo, *filternames, **opts):
3870 def perfbranchmap(ui, repo, *filternames, **opts):
3839 """benchmark the update of a branchmap
3871 """benchmark the update of a branchmap
3840
3872
3841 This benchmarks the full repo.branchmap() call with read and write disabled
3873 This benchmarks the full repo.branchmap() call with read and write disabled
3842 """
3874 """
3843 opts = _byteskwargs(opts)
3875 opts = _byteskwargs(opts)
3844 full = opts.get(b"full", False)
3876 full = opts.get(b"full", False)
3845 clear_revbranch = opts.get(b"clear_revbranch", False)
3877 clear_revbranch = opts.get(b"clear_revbranch", False)
3846 timer, fm = gettimer(ui, opts)
3878 timer, fm = gettimer(ui, opts)
3847
3879
3848 def getbranchmap(filtername):
3880 def getbranchmap(filtername):
3849 """generate a benchmark function for the filtername"""
3881 """generate a benchmark function for the filtername"""
3850 if filtername is None:
3882 if filtername is None:
3851 view = repo
3883 view = repo
3852 else:
3884 else:
3853 view = repo.filtered(filtername)
3885 view = repo.filtered(filtername)
3854 if util.safehasattr(view._branchcaches, '_per_filter'):
3886 if util.safehasattr(view._branchcaches, '_per_filter'):
3855 filtered = view._branchcaches._per_filter
3887 filtered = view._branchcaches._per_filter
3856 else:
3888 else:
3857 # older versions
3889 # older versions
3858 filtered = view._branchcaches
3890 filtered = view._branchcaches
3859
3891
3860 def d():
3892 def d():
3861 if clear_revbranch:
3893 if clear_revbranch:
3862 repo.revbranchcache()._clear()
3894 repo.revbranchcache()._clear()
3863 if full:
3895 if full:
3864 view._branchcaches.clear()
3896 view._branchcaches.clear()
3865 else:
3897 else:
3866 filtered.pop(filtername, None)
3898 filtered.pop(filtername, None)
3867 view.branchmap()
3899 view.branchmap()
3868
3900
3869 return d
3901 return d
3870
3902
3871 # add filter in smaller subset to bigger subset
3903 # add filter in smaller subset to bigger subset
3872 possiblefilters = set(repoview.filtertable)
3904 possiblefilters = set(repoview.filtertable)
3873 if filternames:
3905 if filternames:
3874 possiblefilters &= set(filternames)
3906 possiblefilters &= set(filternames)
3875 subsettable = getbranchmapsubsettable()
3907 subsettable = getbranchmapsubsettable()
3876 allfilters = []
3908 allfilters = []
3877 while possiblefilters:
3909 while possiblefilters:
3878 for name in possiblefilters:
3910 for name in possiblefilters:
3879 subset = subsettable.get(name)
3911 subset = subsettable.get(name)
3880 if subset not in possiblefilters:
3912 if subset not in possiblefilters:
3881 break
3913 break
3882 else:
3914 else:
3883 assert False, b'subset cycle %s!' % possiblefilters
3915 assert False, b'subset cycle %s!' % possiblefilters
3884 allfilters.append(name)
3916 allfilters.append(name)
3885 possiblefilters.remove(name)
3917 possiblefilters.remove(name)
3886
3918
3887 # warm the cache
3919 # warm the cache
3888 if not full:
3920 if not full:
3889 for name in allfilters:
3921 for name in allfilters:
3890 repo.filtered(name).branchmap()
3922 repo.filtered(name).branchmap()
3891 if not filternames or b'unfiltered' in filternames:
3923 if not filternames or b'unfiltered' in filternames:
3892 # add unfiltered
3924 # add unfiltered
3893 allfilters.append(None)
3925 allfilters.append(None)
3894
3926
3895 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3927 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3896 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3928 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3897 branchcacheread.set(classmethod(lambda *args: None))
3929 branchcacheread.set(classmethod(lambda *args: None))
3898 else:
3930 else:
3899 # older versions
3931 # older versions
3900 branchcacheread = safeattrsetter(branchmap, b'read')
3932 branchcacheread = safeattrsetter(branchmap, b'read')
3901 branchcacheread.set(lambda *args: None)
3933 branchcacheread.set(lambda *args: None)
3902 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3934 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3903 branchcachewrite.set(lambda *args: None)
3935 branchcachewrite.set(lambda *args: None)
3904 try:
3936 try:
3905 for name in allfilters:
3937 for name in allfilters:
3906 printname = name
3938 printname = name
3907 if name is None:
3939 if name is None:
3908 printname = b'unfiltered'
3940 printname = b'unfiltered'
3909 timer(getbranchmap(name), title=printname)
3941 timer(getbranchmap(name), title=printname)
3910 finally:
3942 finally:
3911 branchcacheread.restore()
3943 branchcacheread.restore()
3912 branchcachewrite.restore()
3944 branchcachewrite.restore()
3913 fm.end()
3945 fm.end()
3914
3946
3915
3947
3916 @command(
3948 @command(
3917 b'perf::branchmapupdate|perfbranchmapupdate',
3949 b'perf::branchmapupdate|perfbranchmapupdate',
3918 [
3950 [
3919 (b'', b'base', [], b'subset of revision to start from'),
3951 (b'', b'base', [], b'subset of revision to start from'),
3920 (b'', b'target', [], b'subset of revision to end with'),
3952 (b'', b'target', [], b'subset of revision to end with'),
3921 (b'', b'clear-caches', False, b'clear cache between each runs'),
3953 (b'', b'clear-caches', False, b'clear cache between each runs'),
3922 ]
3954 ]
3923 + formatteropts,
3955 + formatteropts,
3924 )
3956 )
3925 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3957 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3926 """benchmark branchmap update from for <base> revs to <target> revs
3958 """benchmark branchmap update from for <base> revs to <target> revs
3927
3959
3928 If `--clear-caches` is passed, the following items will be reset before
3960 If `--clear-caches` is passed, the following items will be reset before
3929 each update:
3961 each update:
3930 * the changelog instance and associated indexes
3962 * the changelog instance and associated indexes
3931 * the rev-branch-cache instance
3963 * the rev-branch-cache instance
3932
3964
3933 Examples:
3965 Examples:
3934
3966
3935 # update for the one last revision
3967 # update for the one last revision
3936 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3968 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3937
3969
3938 $ update for change coming with a new branch
3970 $ update for change coming with a new branch
3939 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3971 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3940 """
3972 """
3941 from mercurial import branchmap
3973 from mercurial import branchmap
3942 from mercurial import repoview
3974 from mercurial import repoview
3943
3975
3944 opts = _byteskwargs(opts)
3976 opts = _byteskwargs(opts)
3945 timer, fm = gettimer(ui, opts)
3977 timer, fm = gettimer(ui, opts)
3946 clearcaches = opts[b'clear_caches']
3978 clearcaches = opts[b'clear_caches']
3947 unfi = repo.unfiltered()
3979 unfi = repo.unfiltered()
3948 x = [None] # used to pass data between closure
3980 x = [None] # used to pass data between closure
3949
3981
3950 # we use a `list` here to avoid possible side effect from smartset
3982 # we use a `list` here to avoid possible side effect from smartset
3951 baserevs = list(scmutil.revrange(repo, base))
3983 baserevs = list(scmutil.revrange(repo, base))
3952 targetrevs = list(scmutil.revrange(repo, target))
3984 targetrevs = list(scmutil.revrange(repo, target))
3953 if not baserevs:
3985 if not baserevs:
3954 raise error.Abort(b'no revisions selected for --base')
3986 raise error.Abort(b'no revisions selected for --base')
3955 if not targetrevs:
3987 if not targetrevs:
3956 raise error.Abort(b'no revisions selected for --target')
3988 raise error.Abort(b'no revisions selected for --target')
3957
3989
3958 # make sure the target branchmap also contains the one in the base
3990 # make sure the target branchmap also contains the one in the base
3959 targetrevs = list(set(baserevs) | set(targetrevs))
3991 targetrevs = list(set(baserevs) | set(targetrevs))
3960 targetrevs.sort()
3992 targetrevs.sort()
3961
3993
3962 cl = repo.changelog
3994 cl = repo.changelog
3963 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3995 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3964 allbaserevs.sort()
3996 allbaserevs.sort()
3965 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3997 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3966
3998
3967 newrevs = list(alltargetrevs.difference(allbaserevs))
3999 newrevs = list(alltargetrevs.difference(allbaserevs))
3968 newrevs.sort()
4000 newrevs.sort()
3969
4001
3970 allrevs = frozenset(unfi.changelog.revs())
4002 allrevs = frozenset(unfi.changelog.revs())
3971 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4003 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3972 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4004 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3973
4005
3974 def basefilter(repo, visibilityexceptions=None):
4006 def basefilter(repo, visibilityexceptions=None):
3975 return basefilterrevs
4007 return basefilterrevs
3976
4008
3977 def targetfilter(repo, visibilityexceptions=None):
4009 def targetfilter(repo, visibilityexceptions=None):
3978 return targetfilterrevs
4010 return targetfilterrevs
3979
4011
3980 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4012 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3981 ui.status(msg % (len(allbaserevs), len(newrevs)))
4013 ui.status(msg % (len(allbaserevs), len(newrevs)))
3982 if targetfilterrevs:
4014 if targetfilterrevs:
3983 msg = b'(%d revisions still filtered)\n'
4015 msg = b'(%d revisions still filtered)\n'
3984 ui.status(msg % len(targetfilterrevs))
4016 ui.status(msg % len(targetfilterrevs))
3985
4017
3986 try:
4018 try:
3987 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4019 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3988 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4020 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3989
4021
3990 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4022 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3991 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4023 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3992
4024
3993 # try to find an existing branchmap to reuse
4025 # try to find an existing branchmap to reuse
3994 subsettable = getbranchmapsubsettable()
4026 subsettable = getbranchmapsubsettable()
3995 candidatefilter = subsettable.get(None)
4027 candidatefilter = subsettable.get(None)
3996 while candidatefilter is not None:
4028 while candidatefilter is not None:
3997 candidatebm = repo.filtered(candidatefilter).branchmap()
4029 candidatebm = repo.filtered(candidatefilter).branchmap()
3998 if candidatebm.validfor(baserepo):
4030 if candidatebm.validfor(baserepo):
3999 filtered = repoview.filterrevs(repo, candidatefilter)
4031 filtered = repoview.filterrevs(repo, candidatefilter)
4000 missing = [r for r in allbaserevs if r in filtered]
4032 missing = [r for r in allbaserevs if r in filtered]
4001 base = candidatebm.copy()
4033 base = candidatebm.copy()
4002 base.update(baserepo, missing)
4034 base.update(baserepo, missing)
4003 break
4035 break
4004 candidatefilter = subsettable.get(candidatefilter)
4036 candidatefilter = subsettable.get(candidatefilter)
4005 else:
4037 else:
4006 # no suitable subset where found
4038 # no suitable subset where found
4007 base = branchmap.branchcache()
4039 base = branchmap.branchcache()
4008 base.update(baserepo, allbaserevs)
4040 base.update(baserepo, allbaserevs)
4009
4041
4010 def setup():
4042 def setup():
4011 x[0] = base.copy()
4043 x[0] = base.copy()
4012 if clearcaches:
4044 if clearcaches:
4013 unfi._revbranchcache = None
4045 unfi._revbranchcache = None
4014 clearchangelog(repo)
4046 clearchangelog(repo)
4015
4047
4016 def bench():
4048 def bench():
4017 x[0].update(targetrepo, newrevs)
4049 x[0].update(targetrepo, newrevs)
4018
4050
4019 timer(bench, setup=setup)
4051 timer(bench, setup=setup)
4020 fm.end()
4052 fm.end()
4021 finally:
4053 finally:
4022 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4054 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4023 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4055 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4024
4056
4025
4057
4026 @command(
4058 @command(
4027 b'perf::branchmapload|perfbranchmapload',
4059 b'perf::branchmapload|perfbranchmapload',
4028 [
4060 [
4029 (b'f', b'filter', b'', b'Specify repoview filter'),
4061 (b'f', b'filter', b'', b'Specify repoview filter'),
4030 (b'', b'list', False, b'List brachmap filter caches'),
4062 (b'', b'list', False, b'List brachmap filter caches'),
4031 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4063 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4032 ]
4064 ]
4033 + formatteropts,
4065 + formatteropts,
4034 )
4066 )
4035 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4067 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4036 """benchmark reading the branchmap"""
4068 """benchmark reading the branchmap"""
4037 opts = _byteskwargs(opts)
4069 opts = _byteskwargs(opts)
4038 clearrevlogs = opts[b'clear_revlogs']
4070 clearrevlogs = opts[b'clear_revlogs']
4039
4071
4040 if list:
4072 if list:
4041 for name, kind, st in repo.cachevfs.readdir(stat=True):
4073 for name, kind, st in repo.cachevfs.readdir(stat=True):
4042 if name.startswith(b'branch2'):
4074 if name.startswith(b'branch2'):
4043 filtername = name.partition(b'-')[2] or b'unfiltered'
4075 filtername = name.partition(b'-')[2] or b'unfiltered'
4044 ui.status(
4076 ui.status(
4045 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4077 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4046 )
4078 )
4047 return
4079 return
4048 if not filter:
4080 if not filter:
4049 filter = None
4081 filter = None
4050 subsettable = getbranchmapsubsettable()
4082 subsettable = getbranchmapsubsettable()
4051 if filter is None:
4083 if filter is None:
4052 repo = repo.unfiltered()
4084 repo = repo.unfiltered()
4053 else:
4085 else:
4054 repo = repoview.repoview(repo, filter)
4086 repo = repoview.repoview(repo, filter)
4055
4087
4056 repo.branchmap() # make sure we have a relevant, up to date branchmap
4088 repo.branchmap() # make sure we have a relevant, up to date branchmap
4057
4089
4058 try:
4090 try:
4059 fromfile = branchmap.branchcache.fromfile
4091 fromfile = branchmap.branchcache.fromfile
4060 except AttributeError:
4092 except AttributeError:
4061 # older versions
4093 # older versions
4062 fromfile = branchmap.read
4094 fromfile = branchmap.read
4063
4095
4064 currentfilter = filter
4096 currentfilter = filter
4065 # try once without timer, the filter may not be cached
4097 # try once without timer, the filter may not be cached
4066 while fromfile(repo) is None:
4098 while fromfile(repo) is None:
4067 currentfilter = subsettable.get(currentfilter)
4099 currentfilter = subsettable.get(currentfilter)
4068 if currentfilter is None:
4100 if currentfilter is None:
4069 raise error.Abort(
4101 raise error.Abort(
4070 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4102 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4071 )
4103 )
4072 repo = repo.filtered(currentfilter)
4104 repo = repo.filtered(currentfilter)
4073 timer, fm = gettimer(ui, opts)
4105 timer, fm = gettimer(ui, opts)
4074
4106
4075 def setup():
4107 def setup():
4076 if clearrevlogs:
4108 if clearrevlogs:
4077 clearchangelog(repo)
4109 clearchangelog(repo)
4078
4110
4079 def bench():
4111 def bench():
4080 fromfile(repo)
4112 fromfile(repo)
4081
4113
4082 timer(bench, setup=setup)
4114 timer(bench, setup=setup)
4083 fm.end()
4115 fm.end()
4084
4116
4085
4117
4086 @command(b'perf::loadmarkers|perfloadmarkers')
4118 @command(b'perf::loadmarkers|perfloadmarkers')
4087 def perfloadmarkers(ui, repo):
4119 def perfloadmarkers(ui, repo):
4088 """benchmark the time to parse the on-disk markers for a repo
4120 """benchmark the time to parse the on-disk markers for a repo
4089
4121
4090 Result is the number of markers in the repo."""
4122 Result is the number of markers in the repo."""
4091 timer, fm = gettimer(ui)
4123 timer, fm = gettimer(ui)
4092 svfs = getsvfs(repo)
4124 svfs = getsvfs(repo)
4093 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4125 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4094 fm.end()
4126 fm.end()
4095
4127
4096
4128
4097 @command(
4129 @command(
4098 b'perf::lrucachedict|perflrucachedict',
4130 b'perf::lrucachedict|perflrucachedict',
4099 formatteropts
4131 formatteropts
4100 + [
4132 + [
4101 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4133 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4102 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4134 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4103 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4135 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4104 (b'', b'size', 4, b'size of cache'),
4136 (b'', b'size', 4, b'size of cache'),
4105 (b'', b'gets', 10000, b'number of key lookups'),
4137 (b'', b'gets', 10000, b'number of key lookups'),
4106 (b'', b'sets', 10000, b'number of key sets'),
4138 (b'', b'sets', 10000, b'number of key sets'),
4107 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4139 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4108 (
4140 (
4109 b'',
4141 b'',
4110 b'mixedgetfreq',
4142 b'mixedgetfreq',
4111 50,
4143 50,
4112 b'frequency of get vs set ops in mixed mode',
4144 b'frequency of get vs set ops in mixed mode',
4113 ),
4145 ),
4114 ],
4146 ],
4115 norepo=True,
4147 norepo=True,
4116 )
4148 )
4117 def perflrucache(
4149 def perflrucache(
4118 ui,
4150 ui,
4119 mincost=0,
4151 mincost=0,
4120 maxcost=100,
4152 maxcost=100,
4121 costlimit=0,
4153 costlimit=0,
4122 size=4,
4154 size=4,
4123 gets=10000,
4155 gets=10000,
4124 sets=10000,
4156 sets=10000,
4125 mixed=10000,
4157 mixed=10000,
4126 mixedgetfreq=50,
4158 mixedgetfreq=50,
4127 **opts
4159 **opts
4128 ):
4160 ):
4129 opts = _byteskwargs(opts)
4161 opts = _byteskwargs(opts)
4130
4162
4131 def doinit():
4163 def doinit():
4132 for i in _xrange(10000):
4164 for i in _xrange(10000):
4133 util.lrucachedict(size)
4165 util.lrucachedict(size)
4134
4166
4135 costrange = list(range(mincost, maxcost + 1))
4167 costrange = list(range(mincost, maxcost + 1))
4136
4168
4137 values = []
4169 values = []
4138 for i in _xrange(size):
4170 for i in _xrange(size):
4139 values.append(random.randint(0, _maxint))
4171 values.append(random.randint(0, _maxint))
4140
4172
4141 # Get mode fills the cache and tests raw lookup performance with no
4173 # Get mode fills the cache and tests raw lookup performance with no
4142 # eviction.
4174 # eviction.
4143 getseq = []
4175 getseq = []
4144 for i in _xrange(gets):
4176 for i in _xrange(gets):
4145 getseq.append(random.choice(values))
4177 getseq.append(random.choice(values))
4146
4178
4147 def dogets():
4179 def dogets():
4148 d = util.lrucachedict(size)
4180 d = util.lrucachedict(size)
4149 for v in values:
4181 for v in values:
4150 d[v] = v
4182 d[v] = v
4151 for key in getseq:
4183 for key in getseq:
4152 value = d[key]
4184 value = d[key]
4153 value # silence pyflakes warning
4185 value # silence pyflakes warning
4154
4186
4155 def dogetscost():
4187 def dogetscost():
4156 d = util.lrucachedict(size, maxcost=costlimit)
4188 d = util.lrucachedict(size, maxcost=costlimit)
4157 for i, v in enumerate(values):
4189 for i, v in enumerate(values):
4158 d.insert(v, v, cost=costs[i])
4190 d.insert(v, v, cost=costs[i])
4159 for key in getseq:
4191 for key in getseq:
4160 try:
4192 try:
4161 value = d[key]
4193 value = d[key]
4162 value # silence pyflakes warning
4194 value # silence pyflakes warning
4163 except KeyError:
4195 except KeyError:
4164 pass
4196 pass
4165
4197
4166 # Set mode tests insertion speed with cache eviction.
4198 # Set mode tests insertion speed with cache eviction.
4167 setseq = []
4199 setseq = []
4168 costs = []
4200 costs = []
4169 for i in _xrange(sets):
4201 for i in _xrange(sets):
4170 setseq.append(random.randint(0, _maxint))
4202 setseq.append(random.randint(0, _maxint))
4171 costs.append(random.choice(costrange))
4203 costs.append(random.choice(costrange))
4172
4204
4173 def doinserts():
4205 def doinserts():
4174 d = util.lrucachedict(size)
4206 d = util.lrucachedict(size)
4175 for v in setseq:
4207 for v in setseq:
4176 d.insert(v, v)
4208 d.insert(v, v)
4177
4209
4178 def doinsertscost():
4210 def doinsertscost():
4179 d = util.lrucachedict(size, maxcost=costlimit)
4211 d = util.lrucachedict(size, maxcost=costlimit)
4180 for i, v in enumerate(setseq):
4212 for i, v in enumerate(setseq):
4181 d.insert(v, v, cost=costs[i])
4213 d.insert(v, v, cost=costs[i])
4182
4214
4183 def dosets():
4215 def dosets():
4184 d = util.lrucachedict(size)
4216 d = util.lrucachedict(size)
4185 for v in setseq:
4217 for v in setseq:
4186 d[v] = v
4218 d[v] = v
4187
4219
4188 # Mixed mode randomly performs gets and sets with eviction.
4220 # Mixed mode randomly performs gets and sets with eviction.
4189 mixedops = []
4221 mixedops = []
4190 for i in _xrange(mixed):
4222 for i in _xrange(mixed):
4191 r = random.randint(0, 100)
4223 r = random.randint(0, 100)
4192 if r < mixedgetfreq:
4224 if r < mixedgetfreq:
4193 op = 0
4225 op = 0
4194 else:
4226 else:
4195 op = 1
4227 op = 1
4196
4228
4197 mixedops.append(
4229 mixedops.append(
4198 (op, random.randint(0, size * 2), random.choice(costrange))
4230 (op, random.randint(0, size * 2), random.choice(costrange))
4199 )
4231 )
4200
4232
4201 def domixed():
4233 def domixed():
4202 d = util.lrucachedict(size)
4234 d = util.lrucachedict(size)
4203
4235
4204 for op, v, cost in mixedops:
4236 for op, v, cost in mixedops:
4205 if op == 0:
4237 if op == 0:
4206 try:
4238 try:
4207 d[v]
4239 d[v]
4208 except KeyError:
4240 except KeyError:
4209 pass
4241 pass
4210 else:
4242 else:
4211 d[v] = v
4243 d[v] = v
4212
4244
4213 def domixedcost():
4245 def domixedcost():
4214 d = util.lrucachedict(size, maxcost=costlimit)
4246 d = util.lrucachedict(size, maxcost=costlimit)
4215
4247
4216 for op, v, cost in mixedops:
4248 for op, v, cost in mixedops:
4217 if op == 0:
4249 if op == 0:
4218 try:
4250 try:
4219 d[v]
4251 d[v]
4220 except KeyError:
4252 except KeyError:
4221 pass
4253 pass
4222 else:
4254 else:
4223 d.insert(v, v, cost=cost)
4255 d.insert(v, v, cost=cost)
4224
4256
4225 benches = [
4257 benches = [
4226 (doinit, b'init'),
4258 (doinit, b'init'),
4227 ]
4259 ]
4228
4260
4229 if costlimit:
4261 if costlimit:
4230 benches.extend(
4262 benches.extend(
4231 [
4263 [
4232 (dogetscost, b'gets w/ cost limit'),
4264 (dogetscost, b'gets w/ cost limit'),
4233 (doinsertscost, b'inserts w/ cost limit'),
4265 (doinsertscost, b'inserts w/ cost limit'),
4234 (domixedcost, b'mixed w/ cost limit'),
4266 (domixedcost, b'mixed w/ cost limit'),
4235 ]
4267 ]
4236 )
4268 )
4237 else:
4269 else:
4238 benches.extend(
4270 benches.extend(
4239 [
4271 [
4240 (dogets, b'gets'),
4272 (dogets, b'gets'),
4241 (doinserts, b'inserts'),
4273 (doinserts, b'inserts'),
4242 (dosets, b'sets'),
4274 (dosets, b'sets'),
4243 (domixed, b'mixed'),
4275 (domixed, b'mixed'),
4244 ]
4276 ]
4245 )
4277 )
4246
4278
4247 for fn, title in benches:
4279 for fn, title in benches:
4248 timer, fm = gettimer(ui, opts)
4280 timer, fm = gettimer(ui, opts)
4249 timer(fn, title=title)
4281 timer(fn, title=title)
4250 fm.end()
4282 fm.end()
4251
4283
4252
4284
4253 @command(
4285 @command(
4254 b'perf::write|perfwrite',
4286 b'perf::write|perfwrite',
4255 formatteropts
4287 formatteropts
4256 + [
4288 + [
4257 (b'', b'write-method', b'write', b'ui write method'),
4289 (b'', b'write-method', b'write', b'ui write method'),
4258 (b'', b'nlines', 100, b'number of lines'),
4290 (b'', b'nlines', 100, b'number of lines'),
4259 (b'', b'nitems', 100, b'number of items (per line)'),
4291 (b'', b'nitems', 100, b'number of items (per line)'),
4260 (b'', b'item', b'x', b'item that is written'),
4292 (b'', b'item', b'x', b'item that is written'),
4261 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4293 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4262 (b'', b'flush-line', None, b'flush after each line'),
4294 (b'', b'flush-line', None, b'flush after each line'),
4263 ],
4295 ],
4264 )
4296 )
4265 def perfwrite(ui, repo, **opts):
4297 def perfwrite(ui, repo, **opts):
4266 """microbenchmark ui.write (and others)"""
4298 """microbenchmark ui.write (and others)"""
4267 opts = _byteskwargs(opts)
4299 opts = _byteskwargs(opts)
4268
4300
4269 write = getattr(ui, _sysstr(opts[b'write_method']))
4301 write = getattr(ui, _sysstr(opts[b'write_method']))
4270 nlines = int(opts[b'nlines'])
4302 nlines = int(opts[b'nlines'])
4271 nitems = int(opts[b'nitems'])
4303 nitems = int(opts[b'nitems'])
4272 item = opts[b'item']
4304 item = opts[b'item']
4273 batch_line = opts.get(b'batch_line')
4305 batch_line = opts.get(b'batch_line')
4274 flush_line = opts.get(b'flush_line')
4306 flush_line = opts.get(b'flush_line')
4275
4307
4276 if batch_line:
4308 if batch_line:
4277 line = item * nitems + b'\n'
4309 line = item * nitems + b'\n'
4278
4310
4279 def benchmark():
4311 def benchmark():
4280 for i in pycompat.xrange(nlines):
4312 for i in pycompat.xrange(nlines):
4281 if batch_line:
4313 if batch_line:
4282 write(line)
4314 write(line)
4283 else:
4315 else:
4284 for i in pycompat.xrange(nitems):
4316 for i in pycompat.xrange(nitems):
4285 write(item)
4317 write(item)
4286 write(b'\n')
4318 write(b'\n')
4287 if flush_line:
4319 if flush_line:
4288 ui.flush()
4320 ui.flush()
4289 ui.flush()
4321 ui.flush()
4290
4322
4291 timer, fm = gettimer(ui, opts)
4323 timer, fm = gettimer(ui, opts)
4292 timer(benchmark)
4324 timer(benchmark)
4293 fm.end()
4325 fm.end()
4294
4326
4295
4327
4296 def uisetup(ui):
4328 def uisetup(ui):
4297 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4329 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4298 commands, b'debugrevlogopts'
4330 commands, b'debugrevlogopts'
4299 ):
4331 ):
4300 # for "historical portability":
4332 # for "historical portability":
4301 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4333 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4302 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4334 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4303 # openrevlog() should cause failure, because it has been
4335 # openrevlog() should cause failure, because it has been
4304 # available since 3.5 (or 49c583ca48c4).
4336 # available since 3.5 (or 49c583ca48c4).
4305 def openrevlog(orig, repo, cmd, file_, opts):
4337 def openrevlog(orig, repo, cmd, file_, opts):
4306 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4338 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4307 raise error.Abort(
4339 raise error.Abort(
4308 b"This version doesn't support --dir option",
4340 b"This version doesn't support --dir option",
4309 hint=b"use 3.5 or later",
4341 hint=b"use 3.5 or later",
4310 )
4342 )
4311 return orig(repo, cmd, file_, opts)
4343 return orig(repo, cmd, file_, opts)
4312
4344
4313 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4345 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4314
4346
4315
4347
4316 @command(
4348 @command(
4317 b'perf::progress|perfprogress',
4349 b'perf::progress|perfprogress',
4318 formatteropts
4350 formatteropts
4319 + [
4351 + [
4320 (b'', b'topic', b'topic', b'topic for progress messages'),
4352 (b'', b'topic', b'topic', b'topic for progress messages'),
4321 (b'c', b'total', 1000000, b'total value we are progressing to'),
4353 (b'c', b'total', 1000000, b'total value we are progressing to'),
4322 ],
4354 ],
4323 norepo=True,
4355 norepo=True,
4324 )
4356 )
4325 def perfprogress(ui, topic=None, total=None, **opts):
4357 def perfprogress(ui, topic=None, total=None, **opts):
4326 """printing of progress bars"""
4358 """printing of progress bars"""
4327 opts = _byteskwargs(opts)
4359 opts = _byteskwargs(opts)
4328
4360
4329 timer, fm = gettimer(ui, opts)
4361 timer, fm = gettimer(ui, opts)
4330
4362
4331 def doprogress():
4363 def doprogress():
4332 with ui.makeprogress(topic, total=total) as progress:
4364 with ui.makeprogress(topic, total=total) as progress:
4333 for i in _xrange(total):
4365 for i in _xrange(total):
4334 progress.increment()
4366 progress.increment()
4335
4367
4336 timer(doprogress)
4368 timer(doprogress)
4337 fm.end()
4369 fm.end()
@@ -1,437 +1,439 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perf::addremove
81 perf::addremove
82 (no help text available)
82 (no help text available)
83 perf::ancestors
83 perf::ancestors
84 (no help text available)
84 (no help text available)
85 perf::ancestorset
85 perf::ancestorset
86 (no help text available)
86 (no help text available)
87 perf::annotate
87 perf::annotate
88 (no help text available)
88 (no help text available)
89 perf::bdiff benchmark a bdiff between revisions
89 perf::bdiff benchmark a bdiff between revisions
90 perf::bookmarks
90 perf::bookmarks
91 benchmark parsing bookmarks from disk to memory
91 benchmark parsing bookmarks from disk to memory
92 perf::branchmap
92 perf::branchmap
93 benchmark the update of a branchmap
93 benchmark the update of a branchmap
94 perf::branchmapload
94 perf::branchmapload
95 benchmark reading the branchmap
95 benchmark reading the branchmap
96 perf::branchmapupdate
96 perf::branchmapupdate
97 benchmark branchmap update from for <base> revs to <target>
97 benchmark branchmap update from for <base> revs to <target>
98 revs
98 revs
99 perf::bundle benchmark the creation of a bundle from a repository
99 perf::bundle benchmark the creation of a bundle from a repository
100 perf::bundleread
100 perf::bundleread
101 Benchmark reading of bundle files.
101 Benchmark reading of bundle files.
102 perf::cca (no help text available)
102 perf::cca (no help text available)
103 perf::changegroupchangelog
103 perf::changegroupchangelog
104 Benchmark producing a changelog group for a changegroup.
104 Benchmark producing a changelog group for a changegroup.
105 perf::changeset
105 perf::changeset
106 (no help text available)
106 (no help text available)
107 perf::ctxfiles
107 perf::ctxfiles
108 (no help text available)
108 (no help text available)
109 perf::delta-find
109 perf::delta-find
110 benchmark the process of finding a valid delta for a revlog
110 benchmark the process of finding a valid delta for a revlog
111 revision
111 revision
112 perf::diffwd Profile diff of working directory changes
112 perf::diffwd Profile diff of working directory changes
113 perf::dirfoldmap
113 perf::dirfoldmap
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 perf::dirs (no help text available)
115 perf::dirs (no help text available)
116 perf::dirstate
116 perf::dirstate
117 benchmap the time of various distate operations
117 benchmap the time of various distate operations
118 perf::dirstatedirs
118 perf::dirstatedirs
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 perf::dirstatefoldmap
120 perf::dirstatefoldmap
121 benchmap a 'dirstate._map.filefoldmap.get()' request
121 benchmap a 'dirstate._map.filefoldmap.get()' request
122 perf::dirstatewrite
122 perf::dirstatewrite
123 benchmap the time it take to write a dirstate on disk
123 benchmap the time it take to write a dirstate on disk
124 perf::discovery
124 perf::discovery
125 benchmark discovery between local repo and the peer at given
125 benchmark discovery between local repo and the peer at given
126 path
126 path
127 perf::fncacheencode
127 perf::fncacheencode
128 (no help text available)
128 (no help text available)
129 perf::fncacheload
129 perf::fncacheload
130 (no help text available)
130 (no help text available)
131 perf::fncachewrite
131 perf::fncachewrite
132 (no help text available)
132 (no help text available)
133 perf::heads benchmark the computation of a changelog heads
133 perf::heads benchmark the computation of a changelog heads
134 perf::helper-mergecopies
134 perf::helper-mergecopies
135 find statistics about potential parameters for
135 find statistics about potential parameters for
136 'perfmergecopies'
136 'perfmergecopies'
137 perf::helper-pathcopies
137 perf::helper-pathcopies
138 find statistic about potential parameters for the
138 find statistic about potential parameters for the
139 'perftracecopies'
139 'perftracecopies'
140 perf::ignore benchmark operation related to computing ignore
140 perf::ignore benchmark operation related to computing ignore
141 perf::index benchmark index creation time followed by a lookup
141 perf::index benchmark index creation time followed by a lookup
142 perf::linelogedits
142 perf::linelogedits
143 (no help text available)
143 (no help text available)
144 perf::loadmarkers
144 perf::loadmarkers
145 benchmark the time to parse the on-disk markers for a repo
145 benchmark the time to parse the on-disk markers for a repo
146 perf::log (no help text available)
146 perf::log (no help text available)
147 perf::lookup (no help text available)
147 perf::lookup (no help text available)
148 perf::lrucachedict
148 perf::lrucachedict
149 (no help text available)
149 (no help text available)
150 perf::manifest
150 perf::manifest
151 benchmark the time to read a manifest from disk and return a
151 benchmark the time to read a manifest from disk and return a
152 usable
152 usable
153 perf::mergecalculate
153 perf::mergecalculate
154 (no help text available)
154 (no help text available)
155 perf::mergecopies
155 perf::mergecopies
156 measure runtime of 'copies.mergecopies'
156 measure runtime of 'copies.mergecopies'
157 perf::moonwalk
157 perf::moonwalk
158 benchmark walking the changelog backwards
158 benchmark walking the changelog backwards
159 perf::nodelookup
159 perf::nodelookup
160 (no help text available)
160 (no help text available)
161 perf::nodemap
161 perf::nodemap
162 benchmark the time necessary to look up revision from a cold
162 benchmark the time necessary to look up revision from a cold
163 nodemap
163 nodemap
164 perf::parents
164 perf::parents
165 benchmark the time necessary to fetch one changeset's parents.
165 benchmark the time necessary to fetch one changeset's parents.
166 perf::pathcopies
166 perf::pathcopies
167 benchmark the copy tracing logic
167 benchmark the copy tracing logic
168 perf::phases benchmark phasesets computation
168 perf::phases benchmark phasesets computation
169 perf::phasesremote
169 perf::phasesremote
170 benchmark time needed to analyse phases of the remote server
170 benchmark time needed to analyse phases of the remote server
171 perf::progress
171 perf::progress
172 printing of progress bars
172 printing of progress bars
173 perf::rawfiles
173 perf::rawfiles
174 (no help text available)
174 (no help text available)
175 perf::revlogchunks
175 perf::revlogchunks
176 Benchmark operations on revlog chunks.
176 Benchmark operations on revlog chunks.
177 perf::revlogindex
177 perf::revlogindex
178 Benchmark operations against a revlog index.
178 Benchmark operations against a revlog index.
179 perf::revlogrevision
179 perf::revlogrevision
180 Benchmark obtaining a revlog revision.
180 Benchmark obtaining a revlog revision.
181 perf::revlogrevisions
181 perf::revlogrevisions
182 Benchmark reading a series of revisions from a revlog.
182 Benchmark reading a series of revisions from a revlog.
183 perf::revlogwrite
183 perf::revlogwrite
184 Benchmark writing a series of revisions to a revlog.
184 Benchmark writing a series of revisions to a revlog.
185 perf::revrange
185 perf::revrange
186 (no help text available)
186 (no help text available)
187 perf::revset benchmark the execution time of a revset
187 perf::revset benchmark the execution time of a revset
188 perf::startup
188 perf::startup
189 (no help text available)
189 (no help text available)
190 perf::status benchmark the performance of a single status call
190 perf::status benchmark the performance of a single status call
191 perf::stream-generate
192 benchmark the full generation of a stream clone
191 perf::stream-locked-section
193 perf::stream-locked-section
192 benchmark the initial, repo-locked, section of a stream-clone
194 benchmark the initial, repo-locked, section of a stream-clone
193 perf::tags (no help text available)
195 perf::tags (no help text available)
194 perf::templating
196 perf::templating
195 test the rendering time of a given template
197 test the rendering time of a given template
196 perf::unbundle
198 perf::unbundle
197 benchmark application of a bundle in a repository.
199 benchmark application of a bundle in a repository.
198 perf::unidiff
200 perf::unidiff
199 benchmark a unified diff between revisions
201 benchmark a unified diff between revisions
200 perf::volatilesets
202 perf::volatilesets
201 benchmark the computation of various volatile set
203 benchmark the computation of various volatile set
202 perf::walk (no help text available)
204 perf::walk (no help text available)
203 perf::write microbenchmark ui.write (and others)
205 perf::write microbenchmark ui.write (and others)
204
206
205 (use 'hg help -v perf' to show built-in aliases and global options)
207 (use 'hg help -v perf' to show built-in aliases and global options)
206
208
207 $ hg help perfaddremove
209 $ hg help perfaddremove
208 hg perf::addremove
210 hg perf::addremove
209
211
210 aliases: perfaddremove
212 aliases: perfaddremove
211
213
212 (no help text available)
214 (no help text available)
213
215
214 options:
216 options:
215
217
216 -T --template TEMPLATE display with template
218 -T --template TEMPLATE display with template
217
219
218 (some details hidden, use --verbose to show complete help)
220 (some details hidden, use --verbose to show complete help)
219
221
220 $ hg perfaddremove
222 $ hg perfaddremove
221 $ hg perfancestors
223 $ hg perfancestors
222 $ hg perfancestorset 2
224 $ hg perfancestorset 2
223 $ hg perfannotate a
225 $ hg perfannotate a
224 $ hg perfbdiff -c 1
226 $ hg perfbdiff -c 1
225 $ hg perfbdiff --alldata 1
227 $ hg perfbdiff --alldata 1
226 $ hg perfunidiff -c 1
228 $ hg perfunidiff -c 1
227 $ hg perfunidiff --alldata 1
229 $ hg perfunidiff --alldata 1
228 $ hg perfbookmarks
230 $ hg perfbookmarks
229 $ hg perfbranchmap
231 $ hg perfbranchmap
230 $ hg perfbranchmapload
232 $ hg perfbranchmapload
231 $ hg perfbranchmapupdate --base "not tip" --target "tip"
233 $ hg perfbranchmapupdate --base "not tip" --target "tip"
232 benchmark of branchmap with 3 revisions with 1 new ones
234 benchmark of branchmap with 3 revisions with 1 new ones
233 $ hg perfcca
235 $ hg perfcca
234 $ hg perfchangegroupchangelog
236 $ hg perfchangegroupchangelog
235 $ hg perfchangegroupchangelog --cgversion 01
237 $ hg perfchangegroupchangelog --cgversion 01
236 $ hg perfchangeset 2
238 $ hg perfchangeset 2
237 $ hg perfctxfiles 2
239 $ hg perfctxfiles 2
238 $ hg perfdiffwd
240 $ hg perfdiffwd
239 $ hg perfdirfoldmap
241 $ hg perfdirfoldmap
240 $ hg perfdirs
242 $ hg perfdirs
241 $ hg perfdirstate
243 $ hg perfdirstate
242 $ hg perfdirstate --contains
244 $ hg perfdirstate --contains
243 $ hg perfdirstate --iteration
245 $ hg perfdirstate --iteration
244 $ hg perfdirstatedirs
246 $ hg perfdirstatedirs
245 $ hg perfdirstatefoldmap
247 $ hg perfdirstatefoldmap
246 $ hg perfdirstatewrite
248 $ hg perfdirstatewrite
247 #if repofncache
249 #if repofncache
248 $ hg perffncacheencode
250 $ hg perffncacheencode
249 $ hg perffncacheload
251 $ hg perffncacheload
250 $ hg debugrebuildfncache
252 $ hg debugrebuildfncache
251 fncache already up to date
253 fncache already up to date
252 $ hg perffncachewrite
254 $ hg perffncachewrite
253 $ hg debugrebuildfncache
255 $ hg debugrebuildfncache
254 fncache already up to date
256 fncache already up to date
255 #endif
257 #endif
256 $ hg perfheads
258 $ hg perfheads
257 $ hg perfignore
259 $ hg perfignore
258 $ hg perfindex
260 $ hg perfindex
259 $ hg perflinelogedits -n 1
261 $ hg perflinelogedits -n 1
260 $ hg perfloadmarkers
262 $ hg perfloadmarkers
261 $ hg perflog
263 $ hg perflog
262 $ hg perflookup 2
264 $ hg perflookup 2
263 $ hg perflrucache
265 $ hg perflrucache
264 $ hg perfmanifest 2
266 $ hg perfmanifest 2
265 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
267 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
266 $ hg perfmanifest -m 44fe2c8352bb
268 $ hg perfmanifest -m 44fe2c8352bb
267 abort: manifest revision must be integer or full node
269 abort: manifest revision must be integer or full node
268 [255]
270 [255]
269 $ hg perfmergecalculate -r 3
271 $ hg perfmergecalculate -r 3
270 $ hg perfmoonwalk
272 $ hg perfmoonwalk
271 $ hg perfnodelookup 2
273 $ hg perfnodelookup 2
272 $ hg perfpathcopies 1 2
274 $ hg perfpathcopies 1 2
273 $ hg perfprogress --total 1000
275 $ hg perfprogress --total 1000
274 $ hg perfrawfiles 2
276 $ hg perfrawfiles 2
275 $ hg perfrevlogindex -c
277 $ hg perfrevlogindex -c
276 #if reporevlogstore
278 #if reporevlogstore
277 $ hg perfrevlogrevisions .hg/store/data/a.i
279 $ hg perfrevlogrevisions .hg/store/data/a.i
278 #endif
280 #endif
279 $ hg perfrevlogrevision -m 0
281 $ hg perfrevlogrevision -m 0
280 $ hg perfrevlogchunks -c
282 $ hg perfrevlogchunks -c
281 $ hg perfrevrange
283 $ hg perfrevrange
282 $ hg perfrevset 'all()'
284 $ hg perfrevset 'all()'
283 $ hg perfstartup
285 $ hg perfstartup
284 $ hg perfstatus
286 $ hg perfstatus
285 $ hg perfstatus --dirstate
287 $ hg perfstatus --dirstate
286 $ hg perftags
288 $ hg perftags
287 $ hg perftemplating
289 $ hg perftemplating
288 $ hg perfvolatilesets
290 $ hg perfvolatilesets
289 $ hg perfwalk
291 $ hg perfwalk
290 $ hg perfparents
292 $ hg perfparents
291 $ hg perfdiscovery -q .
293 $ hg perfdiscovery -q .
292
294
293 Test run control
295 Test run control
294 ----------------
296 ----------------
295
297
296 Simple single entry
298 Simple single entry
297
299
298 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
300 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
299 ! wall * comb * user * sys * (best of 15) (glob)
301 ! wall * comb * user * sys * (best of 15) (glob)
300
302
301 Multiple entries
303 Multiple entries
302
304
303 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
305 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
304 ! wall * comb * user * sys * (best of 5) (glob)
306 ! wall * comb * user * sys * (best of 5) (glob)
305
307
306 error case are ignored
308 error case are ignored
307
309
308 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
310 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
309 malformatted run limit entry, missing "-": 500
311 malformatted run limit entry, missing "-": 500
310 ! wall * comb * user * sys * (best of 5) (glob)
312 ! wall * comb * user * sys * (best of 5) (glob)
311 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
313 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
312 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12
314 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12
313 ! wall * comb * user * sys * (best of 5) (glob)
315 ! wall * comb * user * sys * (best of 5) (glob)
314 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
316 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
315 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
317 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
316 ! wall * comb * user * sys * (best of 5) (glob)
318 ! wall * comb * user * sys * (best of 5) (glob)
317
319
318 test actual output
320 test actual output
319 ------------------
321 ------------------
320
322
321 normal output:
323 normal output:
322
324
323 $ hg perfheads --config perf.stub=no
325 $ hg perfheads --config perf.stub=no
324 ! wall * comb * user * sys * (best of *) (glob)
326 ! wall * comb * user * sys * (best of *) (glob)
325
327
326 detailed output:
328 detailed output:
327
329
328 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
330 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
329 ! wall * comb * user * sys * (best of *) (glob)
331 ! wall * comb * user * sys * (best of *) (glob)
330 ! wall * comb * user * sys * (max of *) (glob)
332 ! wall * comb * user * sys * (max of *) (glob)
331 ! wall * comb * user * sys * (avg of *) (glob)
333 ! wall * comb * user * sys * (avg of *) (glob)
332 ! wall * comb * user * sys * (median of *) (glob)
334 ! wall * comb * user * sys * (median of *) (glob)
333
335
334 test json output
336 test json output
335 ----------------
337 ----------------
336
338
337 normal output:
339 normal output:
338
340
339 $ hg perfheads --template json --config perf.stub=no
341 $ hg perfheads --template json --config perf.stub=no
340 [
342 [
341 {
343 {
342 "comb": *, (glob)
344 "comb": *, (glob)
343 "count": *, (glob)
345 "count": *, (glob)
344 "sys": *, (glob)
346 "sys": *, (glob)
345 "user": *, (glob)
347 "user": *, (glob)
346 "wall": * (glob)
348 "wall": * (glob)
347 }
349 }
348 ]
350 ]
349
351
350 detailed output:
352 detailed output:
351
353
352 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
354 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
353 [
355 [
354 {
356 {
355 "avg.comb": *, (glob)
357 "avg.comb": *, (glob)
356 "avg.count": *, (glob)
358 "avg.count": *, (glob)
357 "avg.sys": *, (glob)
359 "avg.sys": *, (glob)
358 "avg.user": *, (glob)
360 "avg.user": *, (glob)
359 "avg.wall": *, (glob)
361 "avg.wall": *, (glob)
360 "comb": *, (glob)
362 "comb": *, (glob)
361 "count": *, (glob)
363 "count": *, (glob)
362 "max.comb": *, (glob)
364 "max.comb": *, (glob)
363 "max.count": *, (glob)
365 "max.count": *, (glob)
364 "max.sys": *, (glob)
366 "max.sys": *, (glob)
365 "max.user": *, (glob)
367 "max.user": *, (glob)
366 "max.wall": *, (glob)
368 "max.wall": *, (glob)
367 "median.comb": *, (glob)
369 "median.comb": *, (glob)
368 "median.count": *, (glob)
370 "median.count": *, (glob)
369 "median.sys": *, (glob)
371 "median.sys": *, (glob)
370 "median.user": *, (glob)
372 "median.user": *, (glob)
371 "median.wall": *, (glob)
373 "median.wall": *, (glob)
372 "sys": *, (glob)
374 "sys": *, (glob)
373 "user": *, (glob)
375 "user": *, (glob)
374 "wall": * (glob)
376 "wall": * (glob)
375 }
377 }
376 ]
378 ]
377
379
378 Test pre-run feature
380 Test pre-run feature
379 --------------------
381 --------------------
380
382
381 (perf discovery has some spurious output)
383 (perf discovery has some spurious output)
382
384
383 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
385 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
384 ! wall * comb * user * sys * (best of 1) (glob)
386 ! wall * comb * user * sys * (best of 1) (glob)
385 searching for changes
387 searching for changes
386 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
388 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
387 ! wall * comb * user * sys * (best of 1) (glob)
389 ! wall * comb * user * sys * (best of 1) (glob)
388 searching for changes
390 searching for changes
389 searching for changes
391 searching for changes
390 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
392 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
391 ! wall * comb * user * sys * (best of 1) (glob)
393 ! wall * comb * user * sys * (best of 1) (glob)
392 searching for changes
394 searching for changes
393 searching for changes
395 searching for changes
394 searching for changes
396 searching for changes
395 searching for changes
397 searching for changes
396 $ hg perf::bundle 'last(all(), 5)'
398 $ hg perf::bundle 'last(all(), 5)'
397 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
399 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
398 4 changesets found
400 4 changesets found
399 $ hg perf::unbundle last-5.hg
401 $ hg perf::unbundle last-5.hg
400
402
401
403
402 test profile-benchmark option
404 test profile-benchmark option
403 ------------------------------
405 ------------------------------
404
406
405 Function to check that statprof ran
407 Function to check that statprof ran
406 $ statprofran () {
408 $ statprofran () {
407 > egrep 'Sample count:|No samples recorded' > /dev/null
409 > egrep 'Sample count:|No samples recorded' > /dev/null
408 > }
410 > }
409 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
411 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
410
412
411 Check perf.py for historical portability
413 Check perf.py for historical portability
412 ----------------------------------------
414 ----------------------------------------
413
415
414 $ cd "$TESTDIR/.."
416 $ cd "$TESTDIR/.."
415
417
416 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
418 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
417 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
419 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
418 > "$TESTDIR"/check-perf-code.py contrib/perf.py
420 > "$TESTDIR"/check-perf-code.py contrib/perf.py
419 contrib/perf.py:\d+: (re)
421 contrib/perf.py:\d+: (re)
420 > from mercurial import (
422 > from mercurial import (
421 import newer module separately in try clause for early Mercurial
423 import newer module separately in try clause for early Mercurial
422 contrib/perf.py:\d+: (re)
424 contrib/perf.py:\d+: (re)
423 > from mercurial import (
425 > from mercurial import (
424 import newer module separately in try clause for early Mercurial
426 import newer module separately in try clause for early Mercurial
425 contrib/perf.py:\d+: (re)
427 contrib/perf.py:\d+: (re)
426 > origindexpath = orig.opener.join(indexfile)
428 > origindexpath = orig.opener.join(indexfile)
427 use getvfs()/getsvfs() for early Mercurial
429 use getvfs()/getsvfs() for early Mercurial
428 contrib/perf.py:\d+: (re)
430 contrib/perf.py:\d+: (re)
429 > origdatapath = orig.opener.join(datafile)
431 > origdatapath = orig.opener.join(datafile)
430 use getvfs()/getsvfs() for early Mercurial
432 use getvfs()/getsvfs() for early Mercurial
431 contrib/perf.py:\d+: (re)
433 contrib/perf.py:\d+: (re)
432 > vfs = vfsmod.vfs(tmpdir)
434 > vfs = vfsmod.vfs(tmpdir)
433 use getvfs()/getsvfs() for early Mercurial
435 use getvfs()/getsvfs() for early Mercurial
434 contrib/perf.py:\d+: (re)
436 contrib/perf.py:\d+: (re)
435 > vfs.options = getattr(orig.opener, 'options', None)
437 > vfs.options = getattr(orig.opener, 'options', None)
436 use getvfs()/getsvfs() for early Mercurial
438 use getvfs()/getsvfs() for early Mercurial
437 [1]
439 [1]
General Comments 0
You need to be logged in to leave comments. Login now