##// END OF EJS Templates
perf: introduce a `perf::stream-locked-section` command...
marmoute -
r51344:714b63a7 default
parent child Browse files
Show More
@@ -1,4245 +1,4296 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", False)
459 displayall = ui.configbool(b"perf", b"all-timing", False)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 def _timer(
535 def _timer(
536 fm,
536 fm,
537 func,
537 func,
538 setup=None,
538 setup=None,
539 title=None,
539 title=None,
540 displayall=False,
540 displayall=False,
541 limits=DEFAULTLIMITS,
541 limits=DEFAULTLIMITS,
542 prerun=0,
542 prerun=0,
543 profiler=None,
543 profiler=None,
544 ):
544 ):
545 gc.collect()
545 gc.collect()
546 results = []
546 results = []
547 begin = util.timer()
547 begin = util.timer()
548 count = 0
548 count = 0
549 if profiler is None:
549 if profiler is None:
550 profiler = NOOPCTX
550 profiler = NOOPCTX
551 for i in range(prerun):
551 for i in range(prerun):
552 if setup is not None:
552 if setup is not None:
553 setup()
553 setup()
554 func()
554 func()
555 keepgoing = True
555 keepgoing = True
556 while keepgoing:
556 while keepgoing:
557 if setup is not None:
557 if setup is not None:
558 setup()
558 setup()
559 with profiler:
559 with profiler:
560 with timeone() as item:
560 with timeone() as item:
561 r = func()
561 r = func()
562 profiler = NOOPCTX
562 profiler = NOOPCTX
563 count += 1
563 count += 1
564 results.append(item[0])
564 results.append(item[0])
565 cstop = util.timer()
565 cstop = util.timer()
566 # Look for a stop condition.
566 # Look for a stop condition.
567 elapsed = cstop - begin
567 elapsed = cstop - begin
568 for t, mincount in limits:
568 for t, mincount in limits:
569 if elapsed >= t and count >= mincount:
569 if elapsed >= t and count >= mincount:
570 keepgoing = False
570 keepgoing = False
571 break
571 break
572
572
573 formatone(fm, results, title=title, result=r, displayall=displayall)
573 formatone(fm, results, title=title, result=r, displayall=displayall)
574
574
575
575
576 def formatone(fm, timings, title=None, result=None, displayall=False):
576 def formatone(fm, timings, title=None, result=None, displayall=False):
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 if util.safehasattr(dirstate, 'running_status'):
818 if util.safehasattr(dirstate, 'running_status'):
819 with dirstate.running_status(repo):
819 with dirstate.running_status(repo):
820 timer(status_dirstate)
820 timer(status_dirstate)
821 dirstate.invalidate()
821 dirstate.invalidate()
822 else:
822 else:
823 timer(status_dirstate)
823 timer(status_dirstate)
824 else:
824 else:
825 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
825 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
826 fm.end()
826 fm.end()
827
827
828
828
829 @command(b'perf::addremove|perfaddremove', formatteropts)
829 @command(b'perf::addremove|perfaddremove', formatteropts)
830 def perfaddremove(ui, repo, **opts):
830 def perfaddremove(ui, repo, **opts):
831 opts = _byteskwargs(opts)
831 opts = _byteskwargs(opts)
832 timer, fm = gettimer(ui, opts)
832 timer, fm = gettimer(ui, opts)
833 try:
833 try:
834 oldquiet = repo.ui.quiet
834 oldquiet = repo.ui.quiet
835 repo.ui.quiet = True
835 repo.ui.quiet = True
836 matcher = scmutil.match(repo[None])
836 matcher = scmutil.match(repo[None])
837 opts[b'dry_run'] = True
837 opts[b'dry_run'] = True
838 if 'uipathfn' in getargspec(scmutil.addremove).args:
838 if 'uipathfn' in getargspec(scmutil.addremove).args:
839 uipathfn = scmutil.getuipathfn(repo)
839 uipathfn = scmutil.getuipathfn(repo)
840 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
840 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
841 else:
841 else:
842 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
842 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
843 finally:
843 finally:
844 repo.ui.quiet = oldquiet
844 repo.ui.quiet = oldquiet
845 fm.end()
845 fm.end()
846
846
847
847
848 def clearcaches(cl):
848 def clearcaches(cl):
849 # behave somewhat consistently across internal API changes
849 # behave somewhat consistently across internal API changes
850 if util.safehasattr(cl, b'clearcaches'):
850 if util.safehasattr(cl, b'clearcaches'):
851 cl.clearcaches()
851 cl.clearcaches()
852 elif util.safehasattr(cl, b'_nodecache'):
852 elif util.safehasattr(cl, b'_nodecache'):
853 # <= hg-5.2
853 # <= hg-5.2
854 from mercurial.node import nullid, nullrev
854 from mercurial.node import nullid, nullrev
855
855
856 cl._nodecache = {nullid: nullrev}
856 cl._nodecache = {nullid: nullrev}
857 cl._nodepos = None
857 cl._nodepos = None
858
858
859
859
860 @command(b'perf::heads|perfheads', formatteropts)
860 @command(b'perf::heads|perfheads', formatteropts)
861 def perfheads(ui, repo, **opts):
861 def perfheads(ui, repo, **opts):
862 """benchmark the computation of a changelog heads"""
862 """benchmark the computation of a changelog heads"""
863 opts = _byteskwargs(opts)
863 opts = _byteskwargs(opts)
864 timer, fm = gettimer(ui, opts)
864 timer, fm = gettimer(ui, opts)
865 cl = repo.changelog
865 cl = repo.changelog
866
866
867 def s():
867 def s():
868 clearcaches(cl)
868 clearcaches(cl)
869
869
870 def d():
870 def d():
871 len(cl.headrevs())
871 len(cl.headrevs())
872
872
873 timer(d, setup=s)
873 timer(d, setup=s)
874 fm.end()
874 fm.end()
875
875
876
876
877 @command(
877 @command(
878 b'perf::tags|perftags',
878 b'perf::tags|perftags',
879 formatteropts
879 formatteropts
880 + [
880 + [
881 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
881 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
882 ],
882 ],
883 )
883 )
884 def perftags(ui, repo, **opts):
884 def perftags(ui, repo, **opts):
885 opts = _byteskwargs(opts)
885 opts = _byteskwargs(opts)
886 timer, fm = gettimer(ui, opts)
886 timer, fm = gettimer(ui, opts)
887 repocleartagscache = repocleartagscachefunc(repo)
887 repocleartagscache = repocleartagscachefunc(repo)
888 clearrevlogs = opts[b'clear_revlogs']
888 clearrevlogs = opts[b'clear_revlogs']
889
889
890 def s():
890 def s():
891 if clearrevlogs:
891 if clearrevlogs:
892 clearchangelog(repo)
892 clearchangelog(repo)
893 clearfilecache(repo.unfiltered(), 'manifest')
893 clearfilecache(repo.unfiltered(), 'manifest')
894 repocleartagscache()
894 repocleartagscache()
895
895
896 def t():
896 def t():
897 return len(repo.tags())
897 return len(repo.tags())
898
898
899 timer(t, setup=s)
899 timer(t, setup=s)
900 fm.end()
900 fm.end()
901
901
902
902
903 @command(b'perf::ancestors|perfancestors', formatteropts)
903 @command(b'perf::ancestors|perfancestors', formatteropts)
904 def perfancestors(ui, repo, **opts):
904 def perfancestors(ui, repo, **opts):
905 opts = _byteskwargs(opts)
905 opts = _byteskwargs(opts)
906 timer, fm = gettimer(ui, opts)
906 timer, fm = gettimer(ui, opts)
907 heads = repo.changelog.headrevs()
907 heads = repo.changelog.headrevs()
908
908
909 def d():
909 def d():
910 for a in repo.changelog.ancestors(heads):
910 for a in repo.changelog.ancestors(heads):
911 pass
911 pass
912
912
913 timer(d)
913 timer(d)
914 fm.end()
914 fm.end()
915
915
916
916
917 @command(b'perf::ancestorset|perfancestorset', formatteropts)
917 @command(b'perf::ancestorset|perfancestorset', formatteropts)
918 def perfancestorset(ui, repo, revset, **opts):
918 def perfancestorset(ui, repo, revset, **opts):
919 opts = _byteskwargs(opts)
919 opts = _byteskwargs(opts)
920 timer, fm = gettimer(ui, opts)
920 timer, fm = gettimer(ui, opts)
921 revs = repo.revs(revset)
921 revs = repo.revs(revset)
922 heads = repo.changelog.headrevs()
922 heads = repo.changelog.headrevs()
923
923
924 def d():
924 def d():
925 s = repo.changelog.ancestors(heads)
925 s = repo.changelog.ancestors(heads)
926 for rev in revs:
926 for rev in revs:
927 rev in s
927 rev in s
928
928
929 timer(d)
929 timer(d)
930 fm.end()
930 fm.end()
931
931
932
932
933 @command(
933 @command(
934 b'perf::delta-find',
934 b'perf::delta-find',
935 revlogopts + formatteropts,
935 revlogopts + formatteropts,
936 b'-c|-m|FILE REV',
936 b'-c|-m|FILE REV',
937 )
937 )
938 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
938 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
939 """benchmark the process of finding a valid delta for a revlog revision
939 """benchmark the process of finding a valid delta for a revlog revision
940
940
941 When a revlog receives a new revision (e.g. from a commit, or from an
941 When a revlog receives a new revision (e.g. from a commit, or from an
942 incoming bundle), it searches for a suitable delta-base to produce a delta.
942 incoming bundle), it searches for a suitable delta-base to produce a delta.
943 This perf command measures how much time we spend in this process. It
943 This perf command measures how much time we spend in this process. It
944 operates on an already stored revision.
944 operates on an already stored revision.
945
945
946 See `hg help debug-delta-find` for another related command.
946 See `hg help debug-delta-find` for another related command.
947 """
947 """
948 from mercurial import revlogutils
948 from mercurial import revlogutils
949 import mercurial.revlogutils.deltas as deltautil
949 import mercurial.revlogutils.deltas as deltautil
950
950
951 opts = _byteskwargs(opts)
951 opts = _byteskwargs(opts)
952 if arg_2 is None:
952 if arg_2 is None:
953 file_ = None
953 file_ = None
954 rev = arg_1
954 rev = arg_1
955 else:
955 else:
956 file_ = arg_1
956 file_ = arg_1
957 rev = arg_2
957 rev = arg_2
958
958
959 repo = repo.unfiltered()
959 repo = repo.unfiltered()
960
960
961 timer, fm = gettimer(ui, opts)
961 timer, fm = gettimer(ui, opts)
962
962
963 rev = int(rev)
963 rev = int(rev)
964
964
965 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
965 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
966
966
967 deltacomputer = deltautil.deltacomputer(revlog)
967 deltacomputer = deltautil.deltacomputer(revlog)
968
968
969 node = revlog.node(rev)
969 node = revlog.node(rev)
970 p1r, p2r = revlog.parentrevs(rev)
970 p1r, p2r = revlog.parentrevs(rev)
971 p1 = revlog.node(p1r)
971 p1 = revlog.node(p1r)
972 p2 = revlog.node(p2r)
972 p2 = revlog.node(p2r)
973 full_text = revlog.revision(rev)
973 full_text = revlog.revision(rev)
974 textlen = len(full_text)
974 textlen = len(full_text)
975 cachedelta = None
975 cachedelta = None
976 flags = revlog.flags(rev)
976 flags = revlog.flags(rev)
977
977
978 revinfo = revlogutils.revisioninfo(
978 revinfo = revlogutils.revisioninfo(
979 node,
979 node,
980 p1,
980 p1,
981 p2,
981 p2,
982 [full_text], # btext
982 [full_text], # btext
983 textlen,
983 textlen,
984 cachedelta,
984 cachedelta,
985 flags,
985 flags,
986 )
986 )
987
987
988 # Note: we should probably purge the potential caches (like the full
988 # Note: we should probably purge the potential caches (like the full
989 # manifest cache) between runs.
989 # manifest cache) between runs.
990 def find_one():
990 def find_one():
991 with revlog._datafp() as fh:
991 with revlog._datafp() as fh:
992 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
992 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
993
993
994 timer(find_one)
994 timer(find_one)
995 fm.end()
995 fm.end()
996
996
997
997
998 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
998 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
999 def perfdiscovery(ui, repo, path, **opts):
999 def perfdiscovery(ui, repo, path, **opts):
1000 """benchmark discovery between local repo and the peer at given path"""
1000 """benchmark discovery between local repo and the peer at given path"""
1001 repos = [repo, None]
1001 repos = [repo, None]
1002 timer, fm = gettimer(ui, opts)
1002 timer, fm = gettimer(ui, opts)
1003
1003
1004 try:
1004 try:
1005 from mercurial.utils.urlutil import get_unique_pull_path_obj
1005 from mercurial.utils.urlutil import get_unique_pull_path_obj
1006
1006
1007 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1007 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1008 except ImportError:
1008 except ImportError:
1009 try:
1009 try:
1010 from mercurial.utils.urlutil import get_unique_pull_path
1010 from mercurial.utils.urlutil import get_unique_pull_path
1011
1011
1012 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1012 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1013 except ImportError:
1013 except ImportError:
1014 path = ui.expandpath(path)
1014 path = ui.expandpath(path)
1015
1015
1016 def s():
1016 def s():
1017 repos[1] = hg.peer(ui, opts, path)
1017 repos[1] = hg.peer(ui, opts, path)
1018
1018
1019 def d():
1019 def d():
1020 setdiscovery.findcommonheads(ui, *repos)
1020 setdiscovery.findcommonheads(ui, *repos)
1021
1021
1022 timer(d, setup=s)
1022 timer(d, setup=s)
1023 fm.end()
1023 fm.end()
1024
1024
1025
1025
1026 @command(
1026 @command(
1027 b'perf::bookmarks|perfbookmarks',
1027 b'perf::bookmarks|perfbookmarks',
1028 formatteropts
1028 formatteropts
1029 + [
1029 + [
1030 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1030 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1031 ],
1031 ],
1032 )
1032 )
1033 def perfbookmarks(ui, repo, **opts):
1033 def perfbookmarks(ui, repo, **opts):
1034 """benchmark parsing bookmarks from disk to memory"""
1034 """benchmark parsing bookmarks from disk to memory"""
1035 opts = _byteskwargs(opts)
1035 opts = _byteskwargs(opts)
1036 timer, fm = gettimer(ui, opts)
1036 timer, fm = gettimer(ui, opts)
1037
1037
1038 clearrevlogs = opts[b'clear_revlogs']
1038 clearrevlogs = opts[b'clear_revlogs']
1039
1039
1040 def s():
1040 def s():
1041 if clearrevlogs:
1041 if clearrevlogs:
1042 clearchangelog(repo)
1042 clearchangelog(repo)
1043 clearfilecache(repo, b'_bookmarks')
1043 clearfilecache(repo, b'_bookmarks')
1044
1044
1045 def d():
1045 def d():
1046 repo._bookmarks
1046 repo._bookmarks
1047
1047
1048 timer(d, setup=s)
1048 timer(d, setup=s)
1049 fm.end()
1049 fm.end()
1050
1050
1051
1051
1052 @command(
1052 @command(
1053 b'perf::bundle',
1053 b'perf::bundle',
1054 [
1054 [
1055 (
1055 (
1056 b'r',
1056 b'r',
1057 b'rev',
1057 b'rev',
1058 [],
1058 [],
1059 b'changesets to bundle',
1059 b'changesets to bundle',
1060 b'REV',
1060 b'REV',
1061 ),
1061 ),
1062 (
1062 (
1063 b't',
1063 b't',
1064 b'type',
1064 b'type',
1065 b'none',
1065 b'none',
1066 b'bundlespec to use (see `hg help bundlespec`)',
1066 b'bundlespec to use (see `hg help bundlespec`)',
1067 b'TYPE',
1067 b'TYPE',
1068 ),
1068 ),
1069 ]
1069 ]
1070 + formatteropts,
1070 + formatteropts,
1071 b'REVS',
1071 b'REVS',
1072 )
1072 )
1073 def perfbundle(ui, repo, *revs, **opts):
1073 def perfbundle(ui, repo, *revs, **opts):
1074 """benchmark the creation of a bundle from a repository
1074 """benchmark the creation of a bundle from a repository
1075
1075
1076 For now, this only supports "none" compression.
1076 For now, this only supports "none" compression.
1077 """
1077 """
1078 try:
1078 try:
1079 from mercurial import bundlecaches
1079 from mercurial import bundlecaches
1080
1080
1081 parsebundlespec = bundlecaches.parsebundlespec
1081 parsebundlespec = bundlecaches.parsebundlespec
1082 except ImportError:
1082 except ImportError:
1083 from mercurial import exchange
1083 from mercurial import exchange
1084
1084
1085 parsebundlespec = exchange.parsebundlespec
1085 parsebundlespec = exchange.parsebundlespec
1086
1086
1087 from mercurial import discovery
1087 from mercurial import discovery
1088 from mercurial import bundle2
1088 from mercurial import bundle2
1089
1089
1090 opts = _byteskwargs(opts)
1090 opts = _byteskwargs(opts)
1091 timer, fm = gettimer(ui, opts)
1091 timer, fm = gettimer(ui, opts)
1092
1092
1093 cl = repo.changelog
1093 cl = repo.changelog
1094 revs = list(revs)
1094 revs = list(revs)
1095 revs.extend(opts.get(b'rev', ()))
1095 revs.extend(opts.get(b'rev', ()))
1096 revs = scmutil.revrange(repo, revs)
1096 revs = scmutil.revrange(repo, revs)
1097 if not revs:
1097 if not revs:
1098 raise error.Abort(b"not revision specified")
1098 raise error.Abort(b"not revision specified")
1099 # make it a consistent set (ie: without topological gaps)
1099 # make it a consistent set (ie: without topological gaps)
1100 old_len = len(revs)
1100 old_len = len(revs)
1101 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1101 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1102 if old_len != len(revs):
1102 if old_len != len(revs):
1103 new_count = len(revs) - old_len
1103 new_count = len(revs) - old_len
1104 msg = b"add %d new revisions to make it a consistent set\n"
1104 msg = b"add %d new revisions to make it a consistent set\n"
1105 ui.write_err(msg % new_count)
1105 ui.write_err(msg % new_count)
1106
1106
1107 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1107 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1108 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1108 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1109 outgoing = discovery.outgoing(repo, bases, targets)
1109 outgoing = discovery.outgoing(repo, bases, targets)
1110
1110
1111 bundle_spec = opts.get(b'type')
1111 bundle_spec = opts.get(b'type')
1112
1112
1113 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1113 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1114
1114
1115 cgversion = bundle_spec.params.get(b"cg.version")
1115 cgversion = bundle_spec.params.get(b"cg.version")
1116 if cgversion is None:
1116 if cgversion is None:
1117 if bundle_spec.version == b'v1':
1117 if bundle_spec.version == b'v1':
1118 cgversion = b'01'
1118 cgversion = b'01'
1119 if bundle_spec.version == b'v2':
1119 if bundle_spec.version == b'v2':
1120 cgversion = b'02'
1120 cgversion = b'02'
1121 if cgversion not in changegroup.supportedoutgoingversions(repo):
1121 if cgversion not in changegroup.supportedoutgoingversions(repo):
1122 err = b"repository does not support bundle version %s"
1122 err = b"repository does not support bundle version %s"
1123 raise error.Abort(err % cgversion)
1123 raise error.Abort(err % cgversion)
1124
1124
1125 if cgversion == b'01': # bundle1
1125 if cgversion == b'01': # bundle1
1126 bversion = b'HG10' + bundle_spec.wirecompression
1126 bversion = b'HG10' + bundle_spec.wirecompression
1127 bcompression = None
1127 bcompression = None
1128 elif cgversion in (b'02', b'03'):
1128 elif cgversion in (b'02', b'03'):
1129 bversion = b'HG20'
1129 bversion = b'HG20'
1130 bcompression = bundle_spec.wirecompression
1130 bcompression = bundle_spec.wirecompression
1131 else:
1131 else:
1132 err = b'perf::bundle: unexpected changegroup version %s'
1132 err = b'perf::bundle: unexpected changegroup version %s'
1133 raise error.ProgrammingError(err % cgversion)
1133 raise error.ProgrammingError(err % cgversion)
1134
1134
1135 if bcompression is None:
1135 if bcompression is None:
1136 bcompression = b'UN'
1136 bcompression = b'UN'
1137
1137
1138 if bcompression != b'UN':
1138 if bcompression != b'UN':
1139 err = b'perf::bundle: compression currently unsupported: %s'
1139 err = b'perf::bundle: compression currently unsupported: %s'
1140 raise error.ProgrammingError(err % bcompression)
1140 raise error.ProgrammingError(err % bcompression)
1141
1141
1142 def do_bundle():
1142 def do_bundle():
1143 bundle2.writenewbundle(
1143 bundle2.writenewbundle(
1144 ui,
1144 ui,
1145 repo,
1145 repo,
1146 b'perf::bundle',
1146 b'perf::bundle',
1147 os.devnull,
1147 os.devnull,
1148 bversion,
1148 bversion,
1149 outgoing,
1149 outgoing,
1150 bundle_spec.params,
1150 bundle_spec.params,
1151 )
1151 )
1152
1152
1153 timer(do_bundle)
1153 timer(do_bundle)
1154 fm.end()
1154 fm.end()
1155
1155
1156
1156
1157 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1157 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1158 def perfbundleread(ui, repo, bundlepath, **opts):
1158 def perfbundleread(ui, repo, bundlepath, **opts):
1159 """Benchmark reading of bundle files.
1159 """Benchmark reading of bundle files.
1160
1160
1161 This command is meant to isolate the I/O part of bundle reading as
1161 This command is meant to isolate the I/O part of bundle reading as
1162 much as possible.
1162 much as possible.
1163 """
1163 """
1164 from mercurial import (
1164 from mercurial import (
1165 bundle2,
1165 bundle2,
1166 exchange,
1166 exchange,
1167 streamclone,
1167 streamclone,
1168 )
1168 )
1169
1169
1170 opts = _byteskwargs(opts)
1170 opts = _byteskwargs(opts)
1171
1171
1172 def makebench(fn):
1172 def makebench(fn):
1173 def run():
1173 def run():
1174 with open(bundlepath, b'rb') as fh:
1174 with open(bundlepath, b'rb') as fh:
1175 bundle = exchange.readbundle(ui, fh, bundlepath)
1175 bundle = exchange.readbundle(ui, fh, bundlepath)
1176 fn(bundle)
1176 fn(bundle)
1177
1177
1178 return run
1178 return run
1179
1179
1180 def makereadnbytes(size):
1180 def makereadnbytes(size):
1181 def run():
1181 def run():
1182 with open(bundlepath, b'rb') as fh:
1182 with open(bundlepath, b'rb') as fh:
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1184 while bundle.read(size):
1184 while bundle.read(size):
1185 pass
1185 pass
1186
1186
1187 return run
1187 return run
1188
1188
1189 def makestdioread(size):
1189 def makestdioread(size):
1190 def run():
1190 def run():
1191 with open(bundlepath, b'rb') as fh:
1191 with open(bundlepath, b'rb') as fh:
1192 while fh.read(size):
1192 while fh.read(size):
1193 pass
1193 pass
1194
1194
1195 return run
1195 return run
1196
1196
1197 # bundle1
1197 # bundle1
1198
1198
1199 def deltaiter(bundle):
1199 def deltaiter(bundle):
1200 for delta in bundle.deltaiter():
1200 for delta in bundle.deltaiter():
1201 pass
1201 pass
1202
1202
1203 def iterchunks(bundle):
1203 def iterchunks(bundle):
1204 for chunk in bundle.getchunks():
1204 for chunk in bundle.getchunks():
1205 pass
1205 pass
1206
1206
1207 # bundle2
1207 # bundle2
1208
1208
1209 def forwardchunks(bundle):
1209 def forwardchunks(bundle):
1210 for chunk in bundle._forwardchunks():
1210 for chunk in bundle._forwardchunks():
1211 pass
1211 pass
1212
1212
1213 def iterparts(bundle):
1213 def iterparts(bundle):
1214 for part in bundle.iterparts():
1214 for part in bundle.iterparts():
1215 pass
1215 pass
1216
1216
1217 def iterpartsseekable(bundle):
1217 def iterpartsseekable(bundle):
1218 for part in bundle.iterparts(seekable=True):
1218 for part in bundle.iterparts(seekable=True):
1219 pass
1219 pass
1220
1220
1221 def seek(bundle):
1221 def seek(bundle):
1222 for part in bundle.iterparts(seekable=True):
1222 for part in bundle.iterparts(seekable=True):
1223 part.seek(0, os.SEEK_END)
1223 part.seek(0, os.SEEK_END)
1224
1224
1225 def makepartreadnbytes(size):
1225 def makepartreadnbytes(size):
1226 def run():
1226 def run():
1227 with open(bundlepath, b'rb') as fh:
1227 with open(bundlepath, b'rb') as fh:
1228 bundle = exchange.readbundle(ui, fh, bundlepath)
1228 bundle = exchange.readbundle(ui, fh, bundlepath)
1229 for part in bundle.iterparts():
1229 for part in bundle.iterparts():
1230 while part.read(size):
1230 while part.read(size):
1231 pass
1231 pass
1232
1232
1233 return run
1233 return run
1234
1234
1235 benches = [
1235 benches = [
1236 (makestdioread(8192), b'read(8k)'),
1236 (makestdioread(8192), b'read(8k)'),
1237 (makestdioread(16384), b'read(16k)'),
1237 (makestdioread(16384), b'read(16k)'),
1238 (makestdioread(32768), b'read(32k)'),
1238 (makestdioread(32768), b'read(32k)'),
1239 (makestdioread(131072), b'read(128k)'),
1239 (makestdioread(131072), b'read(128k)'),
1240 ]
1240 ]
1241
1241
1242 with open(bundlepath, b'rb') as fh:
1242 with open(bundlepath, b'rb') as fh:
1243 bundle = exchange.readbundle(ui, fh, bundlepath)
1243 bundle = exchange.readbundle(ui, fh, bundlepath)
1244
1244
1245 if isinstance(bundle, changegroup.cg1unpacker):
1245 if isinstance(bundle, changegroup.cg1unpacker):
1246 benches.extend(
1246 benches.extend(
1247 [
1247 [
1248 (makebench(deltaiter), b'cg1 deltaiter()'),
1248 (makebench(deltaiter), b'cg1 deltaiter()'),
1249 (makebench(iterchunks), b'cg1 getchunks()'),
1249 (makebench(iterchunks), b'cg1 getchunks()'),
1250 (makereadnbytes(8192), b'cg1 read(8k)'),
1250 (makereadnbytes(8192), b'cg1 read(8k)'),
1251 (makereadnbytes(16384), b'cg1 read(16k)'),
1251 (makereadnbytes(16384), b'cg1 read(16k)'),
1252 (makereadnbytes(32768), b'cg1 read(32k)'),
1252 (makereadnbytes(32768), b'cg1 read(32k)'),
1253 (makereadnbytes(131072), b'cg1 read(128k)'),
1253 (makereadnbytes(131072), b'cg1 read(128k)'),
1254 ]
1254 ]
1255 )
1255 )
1256 elif isinstance(bundle, bundle2.unbundle20):
1256 elif isinstance(bundle, bundle2.unbundle20):
1257 benches.extend(
1257 benches.extend(
1258 [
1258 [
1259 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1259 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1260 (makebench(iterparts), b'bundle2 iterparts()'),
1260 (makebench(iterparts), b'bundle2 iterparts()'),
1261 (
1261 (
1262 makebench(iterpartsseekable),
1262 makebench(iterpartsseekable),
1263 b'bundle2 iterparts() seekable',
1263 b'bundle2 iterparts() seekable',
1264 ),
1264 ),
1265 (makebench(seek), b'bundle2 part seek()'),
1265 (makebench(seek), b'bundle2 part seek()'),
1266 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1266 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1267 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1267 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1268 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1268 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1269 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1269 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1270 ]
1270 ]
1271 )
1271 )
1272 elif isinstance(bundle, streamclone.streamcloneapplier):
1272 elif isinstance(bundle, streamclone.streamcloneapplier):
1273 raise error.Abort(b'stream clone bundles not supported')
1273 raise error.Abort(b'stream clone bundles not supported')
1274 else:
1274 else:
1275 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1275 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1276
1276
1277 for fn, title in benches:
1277 for fn, title in benches:
1278 timer, fm = gettimer(ui, opts)
1278 timer, fm = gettimer(ui, opts)
1279 timer(fn, title=title)
1279 timer(fn, title=title)
1280 fm.end()
1280 fm.end()
1281
1281
1282
1282
1283 @command(
1283 @command(
1284 b'perf::changegroupchangelog|perfchangegroupchangelog',
1284 b'perf::changegroupchangelog|perfchangegroupchangelog',
1285 formatteropts
1285 formatteropts
1286 + [
1286 + [
1287 (b'', b'cgversion', b'02', b'changegroup version'),
1287 (b'', b'cgversion', b'02', b'changegroup version'),
1288 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1288 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1289 ],
1289 ],
1290 )
1290 )
1291 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1291 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1292 """Benchmark producing a changelog group for a changegroup.
1292 """Benchmark producing a changelog group for a changegroup.
1293
1293
1294 This measures the time spent processing the changelog during a
1294 This measures the time spent processing the changelog during a
1295 bundle operation. This occurs during `hg bundle` and on a server
1295 bundle operation. This occurs during `hg bundle` and on a server
1296 processing a `getbundle` wire protocol request (handles clones
1296 processing a `getbundle` wire protocol request (handles clones
1297 and pull requests).
1297 and pull requests).
1298
1298
1299 By default, all revisions are added to the changegroup.
1299 By default, all revisions are added to the changegroup.
1300 """
1300 """
1301 opts = _byteskwargs(opts)
1301 opts = _byteskwargs(opts)
1302 cl = repo.changelog
1302 cl = repo.changelog
1303 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1303 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1304 bundler = changegroup.getbundler(cgversion, repo)
1304 bundler = changegroup.getbundler(cgversion, repo)
1305
1305
1306 def d():
1306 def d():
1307 state, chunks = bundler._generatechangelog(cl, nodes)
1307 state, chunks = bundler._generatechangelog(cl, nodes)
1308 for chunk in chunks:
1308 for chunk in chunks:
1309 pass
1309 pass
1310
1310
1311 timer, fm = gettimer(ui, opts)
1311 timer, fm = gettimer(ui, opts)
1312
1312
1313 # Terminal printing can interfere with timing. So disable it.
1313 # Terminal printing can interfere with timing. So disable it.
1314 with ui.configoverride({(b'progress', b'disable'): True}):
1314 with ui.configoverride({(b'progress', b'disable'): True}):
1315 timer(d)
1315 timer(d)
1316
1316
1317 fm.end()
1317 fm.end()
1318
1318
1319
1319
1320 @command(b'perf::dirs|perfdirs', formatteropts)
1320 @command(b'perf::dirs|perfdirs', formatteropts)
1321 def perfdirs(ui, repo, **opts):
1321 def perfdirs(ui, repo, **opts):
1322 opts = _byteskwargs(opts)
1322 opts = _byteskwargs(opts)
1323 timer, fm = gettimer(ui, opts)
1323 timer, fm = gettimer(ui, opts)
1324 dirstate = repo.dirstate
1324 dirstate = repo.dirstate
1325 b'a' in dirstate
1325 b'a' in dirstate
1326
1326
1327 def d():
1327 def d():
1328 dirstate.hasdir(b'a')
1328 dirstate.hasdir(b'a')
1329 try:
1329 try:
1330 del dirstate._map._dirs
1330 del dirstate._map._dirs
1331 except AttributeError:
1331 except AttributeError:
1332 pass
1332 pass
1333
1333
1334 timer(d)
1334 timer(d)
1335 fm.end()
1335 fm.end()
1336
1336
1337
1337
1338 @command(
1338 @command(
1339 b'perf::dirstate|perfdirstate',
1339 b'perf::dirstate|perfdirstate',
1340 [
1340 [
1341 (
1341 (
1342 b'',
1342 b'',
1343 b'iteration',
1343 b'iteration',
1344 None,
1344 None,
1345 b'benchmark a full iteration for the dirstate',
1345 b'benchmark a full iteration for the dirstate',
1346 ),
1346 ),
1347 (
1347 (
1348 b'',
1348 b'',
1349 b'contains',
1349 b'contains',
1350 None,
1350 None,
1351 b'benchmark a large amount of `nf in dirstate` calls',
1351 b'benchmark a large amount of `nf in dirstate` calls',
1352 ),
1352 ),
1353 ]
1353 ]
1354 + formatteropts,
1354 + formatteropts,
1355 )
1355 )
1356 def perfdirstate(ui, repo, **opts):
1356 def perfdirstate(ui, repo, **opts):
1357 """benchmap the time of various distate operations
1357 """benchmap the time of various distate operations
1358
1358
1359 By default benchmark the time necessary to load a dirstate from scratch.
1359 By default benchmark the time necessary to load a dirstate from scratch.
1360 The dirstate is loaded to the point were a "contains" request can be
1360 The dirstate is loaded to the point were a "contains" request can be
1361 answered.
1361 answered.
1362 """
1362 """
1363 opts = _byteskwargs(opts)
1363 opts = _byteskwargs(opts)
1364 timer, fm = gettimer(ui, opts)
1364 timer, fm = gettimer(ui, opts)
1365 b"a" in repo.dirstate
1365 b"a" in repo.dirstate
1366
1366
1367 if opts[b'iteration'] and opts[b'contains']:
1367 if opts[b'iteration'] and opts[b'contains']:
1368 msg = b'only specify one of --iteration or --contains'
1368 msg = b'only specify one of --iteration or --contains'
1369 raise error.Abort(msg)
1369 raise error.Abort(msg)
1370
1370
1371 if opts[b'iteration']:
1371 if opts[b'iteration']:
1372 setup = None
1372 setup = None
1373 dirstate = repo.dirstate
1373 dirstate = repo.dirstate
1374
1374
1375 def d():
1375 def d():
1376 for f in dirstate:
1376 for f in dirstate:
1377 pass
1377 pass
1378
1378
1379 elif opts[b'contains']:
1379 elif opts[b'contains']:
1380 setup = None
1380 setup = None
1381 dirstate = repo.dirstate
1381 dirstate = repo.dirstate
1382 allfiles = list(dirstate)
1382 allfiles = list(dirstate)
1383 # also add file path that will be "missing" from the dirstate
1383 # also add file path that will be "missing" from the dirstate
1384 allfiles.extend([f[::-1] for f in allfiles])
1384 allfiles.extend([f[::-1] for f in allfiles])
1385
1385
1386 def d():
1386 def d():
1387 for f in allfiles:
1387 for f in allfiles:
1388 f in dirstate
1388 f in dirstate
1389
1389
1390 else:
1390 else:
1391
1391
1392 def setup():
1392 def setup():
1393 repo.dirstate.invalidate()
1393 repo.dirstate.invalidate()
1394
1394
1395 def d():
1395 def d():
1396 b"a" in repo.dirstate
1396 b"a" in repo.dirstate
1397
1397
1398 timer(d, setup=setup)
1398 timer(d, setup=setup)
1399 fm.end()
1399 fm.end()
1400
1400
1401
1401
1402 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1402 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1403 def perfdirstatedirs(ui, repo, **opts):
1403 def perfdirstatedirs(ui, repo, **opts):
1404 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1404 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1405 opts = _byteskwargs(opts)
1405 opts = _byteskwargs(opts)
1406 timer, fm = gettimer(ui, opts)
1406 timer, fm = gettimer(ui, opts)
1407 repo.dirstate.hasdir(b"a")
1407 repo.dirstate.hasdir(b"a")
1408
1408
1409 def setup():
1409 def setup():
1410 try:
1410 try:
1411 del repo.dirstate._map._dirs
1411 del repo.dirstate._map._dirs
1412 except AttributeError:
1412 except AttributeError:
1413 pass
1413 pass
1414
1414
1415 def d():
1415 def d():
1416 repo.dirstate.hasdir(b"a")
1416 repo.dirstate.hasdir(b"a")
1417
1417
1418 timer(d, setup=setup)
1418 timer(d, setup=setup)
1419 fm.end()
1419 fm.end()
1420
1420
1421
1421
1422 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1422 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1423 def perfdirstatefoldmap(ui, repo, **opts):
1423 def perfdirstatefoldmap(ui, repo, **opts):
1424 """benchmap a `dirstate._map.filefoldmap.get()` request
1424 """benchmap a `dirstate._map.filefoldmap.get()` request
1425
1425
1426 The dirstate filefoldmap cache is dropped between every request.
1426 The dirstate filefoldmap cache is dropped between every request.
1427 """
1427 """
1428 opts = _byteskwargs(opts)
1428 opts = _byteskwargs(opts)
1429 timer, fm = gettimer(ui, opts)
1429 timer, fm = gettimer(ui, opts)
1430 dirstate = repo.dirstate
1430 dirstate = repo.dirstate
1431 dirstate._map.filefoldmap.get(b'a')
1431 dirstate._map.filefoldmap.get(b'a')
1432
1432
1433 def setup():
1433 def setup():
1434 del dirstate._map.filefoldmap
1434 del dirstate._map.filefoldmap
1435
1435
1436 def d():
1436 def d():
1437 dirstate._map.filefoldmap.get(b'a')
1437 dirstate._map.filefoldmap.get(b'a')
1438
1438
1439 timer(d, setup=setup)
1439 timer(d, setup=setup)
1440 fm.end()
1440 fm.end()
1441
1441
1442
1442
1443 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1443 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1444 def perfdirfoldmap(ui, repo, **opts):
1444 def perfdirfoldmap(ui, repo, **opts):
1445 """benchmap a `dirstate._map.dirfoldmap.get()` request
1445 """benchmap a `dirstate._map.dirfoldmap.get()` request
1446
1446
1447 The dirstate dirfoldmap cache is dropped between every request.
1447 The dirstate dirfoldmap cache is dropped between every request.
1448 """
1448 """
1449 opts = _byteskwargs(opts)
1449 opts = _byteskwargs(opts)
1450 timer, fm = gettimer(ui, opts)
1450 timer, fm = gettimer(ui, opts)
1451 dirstate = repo.dirstate
1451 dirstate = repo.dirstate
1452 dirstate._map.dirfoldmap.get(b'a')
1452 dirstate._map.dirfoldmap.get(b'a')
1453
1453
1454 def setup():
1454 def setup():
1455 del dirstate._map.dirfoldmap
1455 del dirstate._map.dirfoldmap
1456 try:
1456 try:
1457 del dirstate._map._dirs
1457 del dirstate._map._dirs
1458 except AttributeError:
1458 except AttributeError:
1459 pass
1459 pass
1460
1460
1461 def d():
1461 def d():
1462 dirstate._map.dirfoldmap.get(b'a')
1462 dirstate._map.dirfoldmap.get(b'a')
1463
1463
1464 timer(d, setup=setup)
1464 timer(d, setup=setup)
1465 fm.end()
1465 fm.end()
1466
1466
1467
1467
1468 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1468 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1469 def perfdirstatewrite(ui, repo, **opts):
1469 def perfdirstatewrite(ui, repo, **opts):
1470 """benchmap the time it take to write a dirstate on disk"""
1470 """benchmap the time it take to write a dirstate on disk"""
1471 opts = _byteskwargs(opts)
1471 opts = _byteskwargs(opts)
1472 timer, fm = gettimer(ui, opts)
1472 timer, fm = gettimer(ui, opts)
1473 ds = repo.dirstate
1473 ds = repo.dirstate
1474 b"a" in ds
1474 b"a" in ds
1475
1475
1476 def setup():
1476 def setup():
1477 ds._dirty = True
1477 ds._dirty = True
1478
1478
1479 def d():
1479 def d():
1480 ds.write(repo.currenttransaction())
1480 ds.write(repo.currenttransaction())
1481
1481
1482 with repo.wlock():
1482 with repo.wlock():
1483 timer(d, setup=setup)
1483 timer(d, setup=setup)
1484 fm.end()
1484 fm.end()
1485
1485
1486
1486
1487 def _getmergerevs(repo, opts):
1487 def _getmergerevs(repo, opts):
1488 """parse command argument to return rev involved in merge
1488 """parse command argument to return rev involved in merge
1489
1489
1490 input: options dictionnary with `rev`, `from` and `bse`
1490 input: options dictionnary with `rev`, `from` and `bse`
1491 output: (localctx, otherctx, basectx)
1491 output: (localctx, otherctx, basectx)
1492 """
1492 """
1493 if opts[b'from']:
1493 if opts[b'from']:
1494 fromrev = scmutil.revsingle(repo, opts[b'from'])
1494 fromrev = scmutil.revsingle(repo, opts[b'from'])
1495 wctx = repo[fromrev]
1495 wctx = repo[fromrev]
1496 else:
1496 else:
1497 wctx = repo[None]
1497 wctx = repo[None]
1498 # we don't want working dir files to be stat'd in the benchmark, so
1498 # we don't want working dir files to be stat'd in the benchmark, so
1499 # prime that cache
1499 # prime that cache
1500 wctx.dirty()
1500 wctx.dirty()
1501 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1501 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1502 if opts[b'base']:
1502 if opts[b'base']:
1503 fromrev = scmutil.revsingle(repo, opts[b'base'])
1503 fromrev = scmutil.revsingle(repo, opts[b'base'])
1504 ancestor = repo[fromrev]
1504 ancestor = repo[fromrev]
1505 else:
1505 else:
1506 ancestor = wctx.ancestor(rctx)
1506 ancestor = wctx.ancestor(rctx)
1507 return (wctx, rctx, ancestor)
1507 return (wctx, rctx, ancestor)
1508
1508
1509
1509
1510 @command(
1510 @command(
1511 b'perf::mergecalculate|perfmergecalculate',
1511 b'perf::mergecalculate|perfmergecalculate',
1512 [
1512 [
1513 (b'r', b'rev', b'.', b'rev to merge against'),
1513 (b'r', b'rev', b'.', b'rev to merge against'),
1514 (b'', b'from', b'', b'rev to merge from'),
1514 (b'', b'from', b'', b'rev to merge from'),
1515 (b'', b'base', b'', b'the revision to use as base'),
1515 (b'', b'base', b'', b'the revision to use as base'),
1516 ]
1516 ]
1517 + formatteropts,
1517 + formatteropts,
1518 )
1518 )
1519 def perfmergecalculate(ui, repo, **opts):
1519 def perfmergecalculate(ui, repo, **opts):
1520 opts = _byteskwargs(opts)
1520 opts = _byteskwargs(opts)
1521 timer, fm = gettimer(ui, opts)
1521 timer, fm = gettimer(ui, opts)
1522
1522
1523 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1523 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1524
1524
1525 def d():
1525 def d():
1526 # acceptremote is True because we don't want prompts in the middle of
1526 # acceptremote is True because we don't want prompts in the middle of
1527 # our benchmark
1527 # our benchmark
1528 merge.calculateupdates(
1528 merge.calculateupdates(
1529 repo,
1529 repo,
1530 wctx,
1530 wctx,
1531 rctx,
1531 rctx,
1532 [ancestor],
1532 [ancestor],
1533 branchmerge=False,
1533 branchmerge=False,
1534 force=False,
1534 force=False,
1535 acceptremote=True,
1535 acceptremote=True,
1536 followcopies=True,
1536 followcopies=True,
1537 )
1537 )
1538
1538
1539 timer(d)
1539 timer(d)
1540 fm.end()
1540 fm.end()
1541
1541
1542
1542
1543 @command(
1543 @command(
1544 b'perf::mergecopies|perfmergecopies',
1544 b'perf::mergecopies|perfmergecopies',
1545 [
1545 [
1546 (b'r', b'rev', b'.', b'rev to merge against'),
1546 (b'r', b'rev', b'.', b'rev to merge against'),
1547 (b'', b'from', b'', b'rev to merge from'),
1547 (b'', b'from', b'', b'rev to merge from'),
1548 (b'', b'base', b'', b'the revision to use as base'),
1548 (b'', b'base', b'', b'the revision to use as base'),
1549 ]
1549 ]
1550 + formatteropts,
1550 + formatteropts,
1551 )
1551 )
1552 def perfmergecopies(ui, repo, **opts):
1552 def perfmergecopies(ui, repo, **opts):
1553 """measure runtime of `copies.mergecopies`"""
1553 """measure runtime of `copies.mergecopies`"""
1554 opts = _byteskwargs(opts)
1554 opts = _byteskwargs(opts)
1555 timer, fm = gettimer(ui, opts)
1555 timer, fm = gettimer(ui, opts)
1556 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1556 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1557
1557
1558 def d():
1558 def d():
1559 # acceptremote is True because we don't want prompts in the middle of
1559 # acceptremote is True because we don't want prompts in the middle of
1560 # our benchmark
1560 # our benchmark
1561 copies.mergecopies(repo, wctx, rctx, ancestor)
1561 copies.mergecopies(repo, wctx, rctx, ancestor)
1562
1562
1563 timer(d)
1563 timer(d)
1564 fm.end()
1564 fm.end()
1565
1565
1566
1566
1567 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1567 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1568 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1568 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1569 """benchmark the copy tracing logic"""
1569 """benchmark the copy tracing logic"""
1570 opts = _byteskwargs(opts)
1570 opts = _byteskwargs(opts)
1571 timer, fm = gettimer(ui, opts)
1571 timer, fm = gettimer(ui, opts)
1572 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1572 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1573 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1573 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1574
1574
1575 def d():
1575 def d():
1576 copies.pathcopies(ctx1, ctx2)
1576 copies.pathcopies(ctx1, ctx2)
1577
1577
1578 timer(d)
1578 timer(d)
1579 fm.end()
1579 fm.end()
1580
1580
1581
1581
1582 @command(
1582 @command(
1583 b'perf::phases|perfphases',
1583 b'perf::phases|perfphases',
1584 [
1584 [
1585 (b'', b'full', False, b'include file reading time too'),
1585 (b'', b'full', False, b'include file reading time too'),
1586 ],
1586 ],
1587 b"",
1587 b"",
1588 )
1588 )
1589 def perfphases(ui, repo, **opts):
1589 def perfphases(ui, repo, **opts):
1590 """benchmark phasesets computation"""
1590 """benchmark phasesets computation"""
1591 opts = _byteskwargs(opts)
1591 opts = _byteskwargs(opts)
1592 timer, fm = gettimer(ui, opts)
1592 timer, fm = gettimer(ui, opts)
1593 _phases = repo._phasecache
1593 _phases = repo._phasecache
1594 full = opts.get(b'full')
1594 full = opts.get(b'full')
1595
1595
1596 def d():
1596 def d():
1597 phases = _phases
1597 phases = _phases
1598 if full:
1598 if full:
1599 clearfilecache(repo, b'_phasecache')
1599 clearfilecache(repo, b'_phasecache')
1600 phases = repo._phasecache
1600 phases = repo._phasecache
1601 phases.invalidate()
1601 phases.invalidate()
1602 phases.loadphaserevs(repo)
1602 phases.loadphaserevs(repo)
1603
1603
1604 timer(d)
1604 timer(d)
1605 fm.end()
1605 fm.end()
1606
1606
1607
1607
1608 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1608 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1609 def perfphasesremote(ui, repo, dest=None, **opts):
1609 def perfphasesremote(ui, repo, dest=None, **opts):
1610 """benchmark time needed to analyse phases of the remote server"""
1610 """benchmark time needed to analyse phases of the remote server"""
1611 from mercurial.node import bin
1611 from mercurial.node import bin
1612 from mercurial import (
1612 from mercurial import (
1613 exchange,
1613 exchange,
1614 hg,
1614 hg,
1615 phases,
1615 phases,
1616 )
1616 )
1617
1617
1618 opts = _byteskwargs(opts)
1618 opts = _byteskwargs(opts)
1619 timer, fm = gettimer(ui, opts)
1619 timer, fm = gettimer(ui, opts)
1620
1620
1621 path = ui.getpath(dest, default=(b'default-push', b'default'))
1621 path = ui.getpath(dest, default=(b'default-push', b'default'))
1622 if not path:
1622 if not path:
1623 raise error.Abort(
1623 raise error.Abort(
1624 b'default repository not configured!',
1624 b'default repository not configured!',
1625 hint=b"see 'hg help config.paths'",
1625 hint=b"see 'hg help config.paths'",
1626 )
1626 )
1627 if util.safehasattr(path, 'main_path'):
1627 if util.safehasattr(path, 'main_path'):
1628 path = path.get_push_variant()
1628 path = path.get_push_variant()
1629 dest = path.loc
1629 dest = path.loc
1630 else:
1630 else:
1631 dest = path.pushloc or path.loc
1631 dest = path.pushloc or path.loc
1632 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1632 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1633 other = hg.peer(repo, opts, dest)
1633 other = hg.peer(repo, opts, dest)
1634
1634
1635 # easier to perform discovery through the operation
1635 # easier to perform discovery through the operation
1636 op = exchange.pushoperation(repo, other)
1636 op = exchange.pushoperation(repo, other)
1637 exchange._pushdiscoverychangeset(op)
1637 exchange._pushdiscoverychangeset(op)
1638
1638
1639 remotesubset = op.fallbackheads
1639 remotesubset = op.fallbackheads
1640
1640
1641 with other.commandexecutor() as e:
1641 with other.commandexecutor() as e:
1642 remotephases = e.callcommand(
1642 remotephases = e.callcommand(
1643 b'listkeys', {b'namespace': b'phases'}
1643 b'listkeys', {b'namespace': b'phases'}
1644 ).result()
1644 ).result()
1645 del other
1645 del other
1646 publishing = remotephases.get(b'publishing', False)
1646 publishing = remotephases.get(b'publishing', False)
1647 if publishing:
1647 if publishing:
1648 ui.statusnoi18n(b'publishing: yes\n')
1648 ui.statusnoi18n(b'publishing: yes\n')
1649 else:
1649 else:
1650 ui.statusnoi18n(b'publishing: no\n')
1650 ui.statusnoi18n(b'publishing: no\n')
1651
1651
1652 has_node = getattr(repo.changelog.index, 'has_node', None)
1652 has_node = getattr(repo.changelog.index, 'has_node', None)
1653 if has_node is None:
1653 if has_node is None:
1654 has_node = repo.changelog.nodemap.__contains__
1654 has_node = repo.changelog.nodemap.__contains__
1655 nonpublishroots = 0
1655 nonpublishroots = 0
1656 for nhex, phase in remotephases.iteritems():
1656 for nhex, phase in remotephases.iteritems():
1657 if nhex == b'publishing': # ignore data related to publish option
1657 if nhex == b'publishing': # ignore data related to publish option
1658 continue
1658 continue
1659 node = bin(nhex)
1659 node = bin(nhex)
1660 if has_node(node) and int(phase):
1660 if has_node(node) and int(phase):
1661 nonpublishroots += 1
1661 nonpublishroots += 1
1662 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1662 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1663 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1663 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1664
1664
1665 def d():
1665 def d():
1666 phases.remotephasessummary(repo, remotesubset, remotephases)
1666 phases.remotephasessummary(repo, remotesubset, remotephases)
1667
1667
1668 timer(d)
1668 timer(d)
1669 fm.end()
1669 fm.end()
1670
1670
1671
1671
1672 @command(
1672 @command(
1673 b'perf::manifest|perfmanifest',
1673 b'perf::manifest|perfmanifest',
1674 [
1674 [
1675 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1675 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1676 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1676 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1677 ]
1677 ]
1678 + formatteropts,
1678 + formatteropts,
1679 b'REV|NODE',
1679 b'REV|NODE',
1680 )
1680 )
1681 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1681 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1682 """benchmark the time to read a manifest from disk and return a usable
1682 """benchmark the time to read a manifest from disk and return a usable
1683 dict-like object
1683 dict-like object
1684
1684
1685 Manifest caches are cleared before retrieval."""
1685 Manifest caches are cleared before retrieval."""
1686 opts = _byteskwargs(opts)
1686 opts = _byteskwargs(opts)
1687 timer, fm = gettimer(ui, opts)
1687 timer, fm = gettimer(ui, opts)
1688 if not manifest_rev:
1688 if not manifest_rev:
1689 ctx = scmutil.revsingle(repo, rev, rev)
1689 ctx = scmutil.revsingle(repo, rev, rev)
1690 t = ctx.manifestnode()
1690 t = ctx.manifestnode()
1691 else:
1691 else:
1692 from mercurial.node import bin
1692 from mercurial.node import bin
1693
1693
1694 if len(rev) == 40:
1694 if len(rev) == 40:
1695 t = bin(rev)
1695 t = bin(rev)
1696 else:
1696 else:
1697 try:
1697 try:
1698 rev = int(rev)
1698 rev = int(rev)
1699
1699
1700 if util.safehasattr(repo.manifestlog, b'getstorage'):
1700 if util.safehasattr(repo.manifestlog, b'getstorage'):
1701 t = repo.manifestlog.getstorage(b'').node(rev)
1701 t = repo.manifestlog.getstorage(b'').node(rev)
1702 else:
1702 else:
1703 t = repo.manifestlog._revlog.lookup(rev)
1703 t = repo.manifestlog._revlog.lookup(rev)
1704 except ValueError:
1704 except ValueError:
1705 raise error.Abort(
1705 raise error.Abort(
1706 b'manifest revision must be integer or full node'
1706 b'manifest revision must be integer or full node'
1707 )
1707 )
1708
1708
1709 def d():
1709 def d():
1710 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1710 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1711 repo.manifestlog[t].read()
1711 repo.manifestlog[t].read()
1712
1712
1713 timer(d)
1713 timer(d)
1714 fm.end()
1714 fm.end()
1715
1715
1716
1716
1717 @command(b'perf::changeset|perfchangeset', formatteropts)
1717 @command(b'perf::changeset|perfchangeset', formatteropts)
1718 def perfchangeset(ui, repo, rev, **opts):
1718 def perfchangeset(ui, repo, rev, **opts):
1719 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1721 n = scmutil.revsingle(repo, rev).node()
1721 n = scmutil.revsingle(repo, rev).node()
1722
1722
1723 def d():
1723 def d():
1724 repo.changelog.read(n)
1724 repo.changelog.read(n)
1725 # repo.changelog._cache = None
1725 # repo.changelog._cache = None
1726
1726
1727 timer(d)
1727 timer(d)
1728 fm.end()
1728 fm.end()
1729
1729
1730
1730
1731 @command(b'perf::ignore|perfignore', formatteropts)
1731 @command(b'perf::ignore|perfignore', formatteropts)
1732 def perfignore(ui, repo, **opts):
1732 def perfignore(ui, repo, **opts):
1733 """benchmark operation related to computing ignore"""
1733 """benchmark operation related to computing ignore"""
1734 opts = _byteskwargs(opts)
1734 opts = _byteskwargs(opts)
1735 timer, fm = gettimer(ui, opts)
1735 timer, fm = gettimer(ui, opts)
1736 dirstate = repo.dirstate
1736 dirstate = repo.dirstate
1737
1737
1738 def setupone():
1738 def setupone():
1739 dirstate.invalidate()
1739 dirstate.invalidate()
1740 clearfilecache(dirstate, b'_ignore')
1740 clearfilecache(dirstate, b'_ignore')
1741
1741
1742 def runone():
1742 def runone():
1743 dirstate._ignore
1743 dirstate._ignore
1744
1744
1745 timer(runone, setup=setupone, title=b"load")
1745 timer(runone, setup=setupone, title=b"load")
1746 fm.end()
1746 fm.end()
1747
1747
1748
1748
1749 @command(
1749 @command(
1750 b'perf::index|perfindex',
1750 b'perf::index|perfindex',
1751 [
1751 [
1752 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1752 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1753 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1753 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1754 ]
1754 ]
1755 + formatteropts,
1755 + formatteropts,
1756 )
1756 )
1757 def perfindex(ui, repo, **opts):
1757 def perfindex(ui, repo, **opts):
1758 """benchmark index creation time followed by a lookup
1758 """benchmark index creation time followed by a lookup
1759
1759
1760 The default is to look `tip` up. Depending on the index implementation,
1760 The default is to look `tip` up. Depending on the index implementation,
1761 the revision looked up can matters. For example, an implementation
1761 the revision looked up can matters. For example, an implementation
1762 scanning the index will have a faster lookup time for `--rev tip` than for
1762 scanning the index will have a faster lookup time for `--rev tip` than for
1763 `--rev 0`. The number of looked up revisions and their order can also
1763 `--rev 0`. The number of looked up revisions and their order can also
1764 matters.
1764 matters.
1765
1765
1766 Example of useful set to test:
1766 Example of useful set to test:
1767
1767
1768 * tip
1768 * tip
1769 * 0
1769 * 0
1770 * -10:
1770 * -10:
1771 * :10
1771 * :10
1772 * -10: + :10
1772 * -10: + :10
1773 * :10: + -10:
1773 * :10: + -10:
1774 * -10000:
1774 * -10000:
1775 * -10000: + 0
1775 * -10000: + 0
1776
1776
1777 It is not currently possible to check for lookup of a missing node. For
1777 It is not currently possible to check for lookup of a missing node. For
1778 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1778 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1779 import mercurial.revlog
1779 import mercurial.revlog
1780
1780
1781 opts = _byteskwargs(opts)
1781 opts = _byteskwargs(opts)
1782 timer, fm = gettimer(ui, opts)
1782 timer, fm = gettimer(ui, opts)
1783 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1783 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1784 if opts[b'no_lookup']:
1784 if opts[b'no_lookup']:
1785 if opts['rev']:
1785 if opts['rev']:
1786 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1786 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1787 nodes = []
1787 nodes = []
1788 elif not opts[b'rev']:
1788 elif not opts[b'rev']:
1789 nodes = [repo[b"tip"].node()]
1789 nodes = [repo[b"tip"].node()]
1790 else:
1790 else:
1791 revs = scmutil.revrange(repo, opts[b'rev'])
1791 revs = scmutil.revrange(repo, opts[b'rev'])
1792 cl = repo.changelog
1792 cl = repo.changelog
1793 nodes = [cl.node(r) for r in revs]
1793 nodes = [cl.node(r) for r in revs]
1794
1794
1795 unfi = repo.unfiltered()
1795 unfi = repo.unfiltered()
1796 # find the filecache func directly
1796 # find the filecache func directly
1797 # This avoid polluting the benchmark with the filecache logic
1797 # This avoid polluting the benchmark with the filecache logic
1798 makecl = unfi.__class__.changelog.func
1798 makecl = unfi.__class__.changelog.func
1799
1799
1800 def setup():
1800 def setup():
1801 # probably not necessary, but for good measure
1801 # probably not necessary, but for good measure
1802 clearchangelog(unfi)
1802 clearchangelog(unfi)
1803
1803
1804 def d():
1804 def d():
1805 cl = makecl(unfi)
1805 cl = makecl(unfi)
1806 for n in nodes:
1806 for n in nodes:
1807 cl.rev(n)
1807 cl.rev(n)
1808
1808
1809 timer(d, setup=setup)
1809 timer(d, setup=setup)
1810 fm.end()
1810 fm.end()
1811
1811
1812
1812
1813 @command(
1813 @command(
1814 b'perf::nodemap|perfnodemap',
1814 b'perf::nodemap|perfnodemap',
1815 [
1815 [
1816 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1816 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1817 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1817 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1818 ]
1818 ]
1819 + formatteropts,
1819 + formatteropts,
1820 )
1820 )
1821 def perfnodemap(ui, repo, **opts):
1821 def perfnodemap(ui, repo, **opts):
1822 """benchmark the time necessary to look up revision from a cold nodemap
1822 """benchmark the time necessary to look up revision from a cold nodemap
1823
1823
1824 Depending on the implementation, the amount and order of revision we look
1824 Depending on the implementation, the amount and order of revision we look
1825 up can varies. Example of useful set to test:
1825 up can varies. Example of useful set to test:
1826 * tip
1826 * tip
1827 * 0
1827 * 0
1828 * -10:
1828 * -10:
1829 * :10
1829 * :10
1830 * -10: + :10
1830 * -10: + :10
1831 * :10: + -10:
1831 * :10: + -10:
1832 * -10000:
1832 * -10000:
1833 * -10000: + 0
1833 * -10000: + 0
1834
1834
1835 The command currently focus on valid binary lookup. Benchmarking for
1835 The command currently focus on valid binary lookup. Benchmarking for
1836 hexlookup, prefix lookup and missing lookup would also be valuable.
1836 hexlookup, prefix lookup and missing lookup would also be valuable.
1837 """
1837 """
1838 import mercurial.revlog
1838 import mercurial.revlog
1839
1839
1840 opts = _byteskwargs(opts)
1840 opts = _byteskwargs(opts)
1841 timer, fm = gettimer(ui, opts)
1841 timer, fm = gettimer(ui, opts)
1842 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1842 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1843
1843
1844 unfi = repo.unfiltered()
1844 unfi = repo.unfiltered()
1845 clearcaches = opts[b'clear_caches']
1845 clearcaches = opts[b'clear_caches']
1846 # find the filecache func directly
1846 # find the filecache func directly
1847 # This avoid polluting the benchmark with the filecache logic
1847 # This avoid polluting the benchmark with the filecache logic
1848 makecl = unfi.__class__.changelog.func
1848 makecl = unfi.__class__.changelog.func
1849 if not opts[b'rev']:
1849 if not opts[b'rev']:
1850 raise error.Abort(b'use --rev to specify revisions to look up')
1850 raise error.Abort(b'use --rev to specify revisions to look up')
1851 revs = scmutil.revrange(repo, opts[b'rev'])
1851 revs = scmutil.revrange(repo, opts[b'rev'])
1852 cl = repo.changelog
1852 cl = repo.changelog
1853 nodes = [cl.node(r) for r in revs]
1853 nodes = [cl.node(r) for r in revs]
1854
1854
1855 # use a list to pass reference to a nodemap from one closure to the next
1855 # use a list to pass reference to a nodemap from one closure to the next
1856 nodeget = [None]
1856 nodeget = [None]
1857
1857
1858 def setnodeget():
1858 def setnodeget():
1859 # probably not necessary, but for good measure
1859 # probably not necessary, but for good measure
1860 clearchangelog(unfi)
1860 clearchangelog(unfi)
1861 cl = makecl(unfi)
1861 cl = makecl(unfi)
1862 if util.safehasattr(cl.index, 'get_rev'):
1862 if util.safehasattr(cl.index, 'get_rev'):
1863 nodeget[0] = cl.index.get_rev
1863 nodeget[0] = cl.index.get_rev
1864 else:
1864 else:
1865 nodeget[0] = cl.nodemap.get
1865 nodeget[0] = cl.nodemap.get
1866
1866
1867 def d():
1867 def d():
1868 get = nodeget[0]
1868 get = nodeget[0]
1869 for n in nodes:
1869 for n in nodes:
1870 get(n)
1870 get(n)
1871
1871
1872 setup = None
1872 setup = None
1873 if clearcaches:
1873 if clearcaches:
1874
1874
1875 def setup():
1875 def setup():
1876 setnodeget()
1876 setnodeget()
1877
1877
1878 else:
1878 else:
1879 setnodeget()
1879 setnodeget()
1880 d() # prewarm the data structure
1880 d() # prewarm the data structure
1881 timer(d, setup=setup)
1881 timer(d, setup=setup)
1882 fm.end()
1882 fm.end()
1883
1883
1884
1884
1885 @command(b'perf::startup|perfstartup', formatteropts)
1885 @command(b'perf::startup|perfstartup', formatteropts)
1886 def perfstartup(ui, repo, **opts):
1886 def perfstartup(ui, repo, **opts):
1887 opts = _byteskwargs(opts)
1887 opts = _byteskwargs(opts)
1888 timer, fm = gettimer(ui, opts)
1888 timer, fm = gettimer(ui, opts)
1889
1889
1890 def d():
1890 def d():
1891 if os.name != 'nt':
1891 if os.name != 'nt':
1892 os.system(
1892 os.system(
1893 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1893 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1894 )
1894 )
1895 else:
1895 else:
1896 os.environ['HGRCPATH'] = r' '
1896 os.environ['HGRCPATH'] = r' '
1897 os.system("%s version -q > NUL" % sys.argv[0])
1897 os.system("%s version -q > NUL" % sys.argv[0])
1898
1898
1899 timer(d)
1899 timer(d)
1900 fm.end()
1900 fm.end()
1901
1901
1902
1902
1903 @command(
1904 b'perf::stream-locked-section',
1905 [
1906 (
1907 b'',
1908 b'stream-version',
1909 b'latest',
1910 b'stream version to us ("v1", "v2" or "latest", (the default))',
1911 ),
1912 ]
1913 + formatteropts,
1914 )
1915 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1916 """benchmark the initial, repo-locked, section of a stream-clone"""
1917 import mercurial.streamclone
1918
1919 generatev1 = mercurial.streamclone.generatev1
1920 generatev2 = mercurial.streamclone.generatev2
1921
1922 opts = _byteskwargs(opts)
1923 timer, fm = gettimer(ui, opts)
1924
1925 # deletion of the generator may trigger some cleanup that we do not want to
1926 # measure
1927 result_holder = [None]
1928
1929 def setupone():
1930 result_holder[0] = None
1931
1932 def runone_v1():
1933 # the lock is held for the duration the initialisation
1934 result_holder[0] = generatev1(repo)
1935
1936 def runone_v2():
1937 # the lock is held for the duration the initialisation
1938 result_holder[0] = generatev2(repo, None, None, True)
1939
1940 if stream_version == b'latest':
1941 runone = runone_v2
1942 elif stream_version == b'v2':
1943 runone = runone_v2
1944 elif stream_version == b'v1':
1945 runone = runone_v1
1946 else:
1947 msg = b'unknown stream version: "%s"' % stream_version
1948 raise error.Abort(msg)
1949
1950 timer(runone, setup=setupone, title=b"load")
1951 fm.end()
1952
1953
1903 @command(b'perf::parents|perfparents', formatteropts)
1954 @command(b'perf::parents|perfparents', formatteropts)
1904 def perfparents(ui, repo, **opts):
1955 def perfparents(ui, repo, **opts):
1905 """benchmark the time necessary to fetch one changeset's parents.
1956 """benchmark the time necessary to fetch one changeset's parents.
1906
1957
1907 The fetch is done using the `node identifier`, traversing all object layers
1958 The fetch is done using the `node identifier`, traversing all object layers
1908 from the repository object. The first N revisions will be used for this
1959 from the repository object. The first N revisions will be used for this
1909 benchmark. N is controlled by the ``perf.parentscount`` config option
1960 benchmark. N is controlled by the ``perf.parentscount`` config option
1910 (default: 1000).
1961 (default: 1000).
1911 """
1962 """
1912 opts = _byteskwargs(opts)
1963 opts = _byteskwargs(opts)
1913 timer, fm = gettimer(ui, opts)
1964 timer, fm = gettimer(ui, opts)
1914 # control the number of commits perfparents iterates over
1965 # control the number of commits perfparents iterates over
1915 # experimental config: perf.parentscount
1966 # experimental config: perf.parentscount
1916 count = getint(ui, b"perf", b"parentscount", 1000)
1967 count = getint(ui, b"perf", b"parentscount", 1000)
1917 if len(repo.changelog) < count:
1968 if len(repo.changelog) < count:
1918 raise error.Abort(b"repo needs %d commits for this test" % count)
1969 raise error.Abort(b"repo needs %d commits for this test" % count)
1919 repo = repo.unfiltered()
1970 repo = repo.unfiltered()
1920 nl = [repo.changelog.node(i) for i in _xrange(count)]
1971 nl = [repo.changelog.node(i) for i in _xrange(count)]
1921
1972
1922 def d():
1973 def d():
1923 for n in nl:
1974 for n in nl:
1924 repo.changelog.parents(n)
1975 repo.changelog.parents(n)
1925
1976
1926 timer(d)
1977 timer(d)
1927 fm.end()
1978 fm.end()
1928
1979
1929
1980
1930 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1981 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1931 def perfctxfiles(ui, repo, x, **opts):
1982 def perfctxfiles(ui, repo, x, **opts):
1932 opts = _byteskwargs(opts)
1983 opts = _byteskwargs(opts)
1933 x = int(x)
1984 x = int(x)
1934 timer, fm = gettimer(ui, opts)
1985 timer, fm = gettimer(ui, opts)
1935
1986
1936 def d():
1987 def d():
1937 len(repo[x].files())
1988 len(repo[x].files())
1938
1989
1939 timer(d)
1990 timer(d)
1940 fm.end()
1991 fm.end()
1941
1992
1942
1993
1943 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1994 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1944 def perfrawfiles(ui, repo, x, **opts):
1995 def perfrawfiles(ui, repo, x, **opts):
1945 opts = _byteskwargs(opts)
1996 opts = _byteskwargs(opts)
1946 x = int(x)
1997 x = int(x)
1947 timer, fm = gettimer(ui, opts)
1998 timer, fm = gettimer(ui, opts)
1948 cl = repo.changelog
1999 cl = repo.changelog
1949
2000
1950 def d():
2001 def d():
1951 len(cl.read(x)[3])
2002 len(cl.read(x)[3])
1952
2003
1953 timer(d)
2004 timer(d)
1954 fm.end()
2005 fm.end()
1955
2006
1956
2007
1957 @command(b'perf::lookup|perflookup', formatteropts)
2008 @command(b'perf::lookup|perflookup', formatteropts)
1958 def perflookup(ui, repo, rev, **opts):
2009 def perflookup(ui, repo, rev, **opts):
1959 opts = _byteskwargs(opts)
2010 opts = _byteskwargs(opts)
1960 timer, fm = gettimer(ui, opts)
2011 timer, fm = gettimer(ui, opts)
1961 timer(lambda: len(repo.lookup(rev)))
2012 timer(lambda: len(repo.lookup(rev)))
1962 fm.end()
2013 fm.end()
1963
2014
1964
2015
1965 @command(
2016 @command(
1966 b'perf::linelogedits|perflinelogedits',
2017 b'perf::linelogedits|perflinelogedits',
1967 [
2018 [
1968 (b'n', b'edits', 10000, b'number of edits'),
2019 (b'n', b'edits', 10000, b'number of edits'),
1969 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2020 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1970 ],
2021 ],
1971 norepo=True,
2022 norepo=True,
1972 )
2023 )
1973 def perflinelogedits(ui, **opts):
2024 def perflinelogedits(ui, **opts):
1974 from mercurial import linelog
2025 from mercurial import linelog
1975
2026
1976 opts = _byteskwargs(opts)
2027 opts = _byteskwargs(opts)
1977
2028
1978 edits = opts[b'edits']
2029 edits = opts[b'edits']
1979 maxhunklines = opts[b'max_hunk_lines']
2030 maxhunklines = opts[b'max_hunk_lines']
1980
2031
1981 maxb1 = 100000
2032 maxb1 = 100000
1982 random.seed(0)
2033 random.seed(0)
1983 randint = random.randint
2034 randint = random.randint
1984 currentlines = 0
2035 currentlines = 0
1985 arglist = []
2036 arglist = []
1986 for rev in _xrange(edits):
2037 for rev in _xrange(edits):
1987 a1 = randint(0, currentlines)
2038 a1 = randint(0, currentlines)
1988 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2039 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1989 b1 = randint(0, maxb1)
2040 b1 = randint(0, maxb1)
1990 b2 = randint(b1, b1 + maxhunklines)
2041 b2 = randint(b1, b1 + maxhunklines)
1991 currentlines += (b2 - b1) - (a2 - a1)
2042 currentlines += (b2 - b1) - (a2 - a1)
1992 arglist.append((rev, a1, a2, b1, b2))
2043 arglist.append((rev, a1, a2, b1, b2))
1993
2044
1994 def d():
2045 def d():
1995 ll = linelog.linelog()
2046 ll = linelog.linelog()
1996 for args in arglist:
2047 for args in arglist:
1997 ll.replacelines(*args)
2048 ll.replacelines(*args)
1998
2049
1999 timer, fm = gettimer(ui, opts)
2050 timer, fm = gettimer(ui, opts)
2000 timer(d)
2051 timer(d)
2001 fm.end()
2052 fm.end()
2002
2053
2003
2054
2004 @command(b'perf::revrange|perfrevrange', formatteropts)
2055 @command(b'perf::revrange|perfrevrange', formatteropts)
2005 def perfrevrange(ui, repo, *specs, **opts):
2056 def perfrevrange(ui, repo, *specs, **opts):
2006 opts = _byteskwargs(opts)
2057 opts = _byteskwargs(opts)
2007 timer, fm = gettimer(ui, opts)
2058 timer, fm = gettimer(ui, opts)
2008 revrange = scmutil.revrange
2059 revrange = scmutil.revrange
2009 timer(lambda: len(revrange(repo, specs)))
2060 timer(lambda: len(revrange(repo, specs)))
2010 fm.end()
2061 fm.end()
2011
2062
2012
2063
2013 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2064 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2014 def perfnodelookup(ui, repo, rev, **opts):
2065 def perfnodelookup(ui, repo, rev, **opts):
2015 opts = _byteskwargs(opts)
2066 opts = _byteskwargs(opts)
2016 timer, fm = gettimer(ui, opts)
2067 timer, fm = gettimer(ui, opts)
2017 import mercurial.revlog
2068 import mercurial.revlog
2018
2069
2019 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2070 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2020 n = scmutil.revsingle(repo, rev).node()
2071 n = scmutil.revsingle(repo, rev).node()
2021
2072
2022 try:
2073 try:
2023 cl = revlog(getsvfs(repo), radix=b"00changelog")
2074 cl = revlog(getsvfs(repo), radix=b"00changelog")
2024 except TypeError:
2075 except TypeError:
2025 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2076 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2026
2077
2027 def d():
2078 def d():
2028 cl.rev(n)
2079 cl.rev(n)
2029 clearcaches(cl)
2080 clearcaches(cl)
2030
2081
2031 timer(d)
2082 timer(d)
2032 fm.end()
2083 fm.end()
2033
2084
2034
2085
2035 @command(
2086 @command(
2036 b'perf::log|perflog',
2087 b'perf::log|perflog',
2037 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2088 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2038 )
2089 )
2039 def perflog(ui, repo, rev=None, **opts):
2090 def perflog(ui, repo, rev=None, **opts):
2040 opts = _byteskwargs(opts)
2091 opts = _byteskwargs(opts)
2041 if rev is None:
2092 if rev is None:
2042 rev = []
2093 rev = []
2043 timer, fm = gettimer(ui, opts)
2094 timer, fm = gettimer(ui, opts)
2044 ui.pushbuffer()
2095 ui.pushbuffer()
2045 timer(
2096 timer(
2046 lambda: commands.log(
2097 lambda: commands.log(
2047 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2098 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2048 )
2099 )
2049 )
2100 )
2050 ui.popbuffer()
2101 ui.popbuffer()
2051 fm.end()
2102 fm.end()
2052
2103
2053
2104
2054 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2105 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2055 def perfmoonwalk(ui, repo, **opts):
2106 def perfmoonwalk(ui, repo, **opts):
2056 """benchmark walking the changelog backwards
2107 """benchmark walking the changelog backwards
2057
2108
2058 This also loads the changelog data for each revision in the changelog.
2109 This also loads the changelog data for each revision in the changelog.
2059 """
2110 """
2060 opts = _byteskwargs(opts)
2111 opts = _byteskwargs(opts)
2061 timer, fm = gettimer(ui, opts)
2112 timer, fm = gettimer(ui, opts)
2062
2113
2063 def moonwalk():
2114 def moonwalk():
2064 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2115 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2065 ctx = repo[i]
2116 ctx = repo[i]
2066 ctx.branch() # read changelog data (in addition to the index)
2117 ctx.branch() # read changelog data (in addition to the index)
2067
2118
2068 timer(moonwalk)
2119 timer(moonwalk)
2069 fm.end()
2120 fm.end()
2070
2121
2071
2122
2072 @command(
2123 @command(
2073 b'perf::templating|perftemplating',
2124 b'perf::templating|perftemplating',
2074 [
2125 [
2075 (b'r', b'rev', [], b'revisions to run the template on'),
2126 (b'r', b'rev', [], b'revisions to run the template on'),
2076 ]
2127 ]
2077 + formatteropts,
2128 + formatteropts,
2078 )
2129 )
2079 def perftemplating(ui, repo, testedtemplate=None, **opts):
2130 def perftemplating(ui, repo, testedtemplate=None, **opts):
2080 """test the rendering time of a given template"""
2131 """test the rendering time of a given template"""
2081 if makelogtemplater is None:
2132 if makelogtemplater is None:
2082 raise error.Abort(
2133 raise error.Abort(
2083 b"perftemplating not available with this Mercurial",
2134 b"perftemplating not available with this Mercurial",
2084 hint=b"use 4.3 or later",
2135 hint=b"use 4.3 or later",
2085 )
2136 )
2086
2137
2087 opts = _byteskwargs(opts)
2138 opts = _byteskwargs(opts)
2088
2139
2089 nullui = ui.copy()
2140 nullui = ui.copy()
2090 nullui.fout = open(os.devnull, 'wb')
2141 nullui.fout = open(os.devnull, 'wb')
2091 nullui.disablepager()
2142 nullui.disablepager()
2092 revs = opts.get(b'rev')
2143 revs = opts.get(b'rev')
2093 if not revs:
2144 if not revs:
2094 revs = [b'all()']
2145 revs = [b'all()']
2095 revs = list(scmutil.revrange(repo, revs))
2146 revs = list(scmutil.revrange(repo, revs))
2096
2147
2097 defaulttemplate = (
2148 defaulttemplate = (
2098 b'{date|shortdate} [{rev}:{node|short}]'
2149 b'{date|shortdate} [{rev}:{node|short}]'
2099 b' {author|person}: {desc|firstline}\n'
2150 b' {author|person}: {desc|firstline}\n'
2100 )
2151 )
2101 if testedtemplate is None:
2152 if testedtemplate is None:
2102 testedtemplate = defaulttemplate
2153 testedtemplate = defaulttemplate
2103 displayer = makelogtemplater(nullui, repo, testedtemplate)
2154 displayer = makelogtemplater(nullui, repo, testedtemplate)
2104
2155
2105 def format():
2156 def format():
2106 for r in revs:
2157 for r in revs:
2107 ctx = repo[r]
2158 ctx = repo[r]
2108 displayer.show(ctx)
2159 displayer.show(ctx)
2109 displayer.flush(ctx)
2160 displayer.flush(ctx)
2110
2161
2111 timer, fm = gettimer(ui, opts)
2162 timer, fm = gettimer(ui, opts)
2112 timer(format)
2163 timer(format)
2113 fm.end()
2164 fm.end()
2114
2165
2115
2166
2116 def _displaystats(ui, opts, entries, data):
2167 def _displaystats(ui, opts, entries, data):
2117 # use a second formatter because the data are quite different, not sure
2168 # use a second formatter because the data are quite different, not sure
2118 # how it flies with the templater.
2169 # how it flies with the templater.
2119 fm = ui.formatter(b'perf-stats', opts)
2170 fm = ui.formatter(b'perf-stats', opts)
2120 for key, title in entries:
2171 for key, title in entries:
2121 values = data[key]
2172 values = data[key]
2122 nbvalues = len(data)
2173 nbvalues = len(data)
2123 values.sort()
2174 values.sort()
2124 stats = {
2175 stats = {
2125 'key': key,
2176 'key': key,
2126 'title': title,
2177 'title': title,
2127 'nbitems': len(values),
2178 'nbitems': len(values),
2128 'min': values[0][0],
2179 'min': values[0][0],
2129 '10%': values[(nbvalues * 10) // 100][0],
2180 '10%': values[(nbvalues * 10) // 100][0],
2130 '25%': values[(nbvalues * 25) // 100][0],
2181 '25%': values[(nbvalues * 25) // 100][0],
2131 '50%': values[(nbvalues * 50) // 100][0],
2182 '50%': values[(nbvalues * 50) // 100][0],
2132 '75%': values[(nbvalues * 75) // 100][0],
2183 '75%': values[(nbvalues * 75) // 100][0],
2133 '80%': values[(nbvalues * 80) // 100][0],
2184 '80%': values[(nbvalues * 80) // 100][0],
2134 '85%': values[(nbvalues * 85) // 100][0],
2185 '85%': values[(nbvalues * 85) // 100][0],
2135 '90%': values[(nbvalues * 90) // 100][0],
2186 '90%': values[(nbvalues * 90) // 100][0],
2136 '95%': values[(nbvalues * 95) // 100][0],
2187 '95%': values[(nbvalues * 95) // 100][0],
2137 '99%': values[(nbvalues * 99) // 100][0],
2188 '99%': values[(nbvalues * 99) // 100][0],
2138 'max': values[-1][0],
2189 'max': values[-1][0],
2139 }
2190 }
2140 fm.startitem()
2191 fm.startitem()
2141 fm.data(**stats)
2192 fm.data(**stats)
2142 # make node pretty for the human output
2193 # make node pretty for the human output
2143 fm.plain('### %s (%d items)\n' % (title, len(values)))
2194 fm.plain('### %s (%d items)\n' % (title, len(values)))
2144 lines = [
2195 lines = [
2145 'min',
2196 'min',
2146 '10%',
2197 '10%',
2147 '25%',
2198 '25%',
2148 '50%',
2199 '50%',
2149 '75%',
2200 '75%',
2150 '80%',
2201 '80%',
2151 '85%',
2202 '85%',
2152 '90%',
2203 '90%',
2153 '95%',
2204 '95%',
2154 '99%',
2205 '99%',
2155 'max',
2206 'max',
2156 ]
2207 ]
2157 for l in lines:
2208 for l in lines:
2158 fm.plain('%s: %s\n' % (l, stats[l]))
2209 fm.plain('%s: %s\n' % (l, stats[l]))
2159 fm.end()
2210 fm.end()
2160
2211
2161
2212
2162 @command(
2213 @command(
2163 b'perf::helper-mergecopies|perfhelper-mergecopies',
2214 b'perf::helper-mergecopies|perfhelper-mergecopies',
2164 formatteropts
2215 formatteropts
2165 + [
2216 + [
2166 (b'r', b'revs', [], b'restrict search to these revisions'),
2217 (b'r', b'revs', [], b'restrict search to these revisions'),
2167 (b'', b'timing', False, b'provides extra data (costly)'),
2218 (b'', b'timing', False, b'provides extra data (costly)'),
2168 (b'', b'stats', False, b'provides statistic about the measured data'),
2219 (b'', b'stats', False, b'provides statistic about the measured data'),
2169 ],
2220 ],
2170 )
2221 )
2171 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2222 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2172 """find statistics about potential parameters for `perfmergecopies`
2223 """find statistics about potential parameters for `perfmergecopies`
2173
2224
2174 This command find (base, p1, p2) triplet relevant for copytracing
2225 This command find (base, p1, p2) triplet relevant for copytracing
2175 benchmarking in the context of a merge. It reports values for some of the
2226 benchmarking in the context of a merge. It reports values for some of the
2176 parameters that impact merge copy tracing time during merge.
2227 parameters that impact merge copy tracing time during merge.
2177
2228
2178 If `--timing` is set, rename detection is run and the associated timing
2229 If `--timing` is set, rename detection is run and the associated timing
2179 will be reported. The extra details come at the cost of slower command
2230 will be reported. The extra details come at the cost of slower command
2180 execution.
2231 execution.
2181
2232
2182 Since rename detection is only run once, other factors might easily
2233 Since rename detection is only run once, other factors might easily
2183 affect the precision of the timing. However it should give a good
2234 affect the precision of the timing. However it should give a good
2184 approximation of which revision triplets are very costly.
2235 approximation of which revision triplets are very costly.
2185 """
2236 """
2186 opts = _byteskwargs(opts)
2237 opts = _byteskwargs(opts)
2187 fm = ui.formatter(b'perf', opts)
2238 fm = ui.formatter(b'perf', opts)
2188 dotiming = opts[b'timing']
2239 dotiming = opts[b'timing']
2189 dostats = opts[b'stats']
2240 dostats = opts[b'stats']
2190
2241
2191 output_template = [
2242 output_template = [
2192 ("base", "%(base)12s"),
2243 ("base", "%(base)12s"),
2193 ("p1", "%(p1.node)12s"),
2244 ("p1", "%(p1.node)12s"),
2194 ("p2", "%(p2.node)12s"),
2245 ("p2", "%(p2.node)12s"),
2195 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2246 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2196 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2247 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2197 ("p1.renames", "%(p1.renamedfiles)12d"),
2248 ("p1.renames", "%(p1.renamedfiles)12d"),
2198 ("p1.time", "%(p1.time)12.3f"),
2249 ("p1.time", "%(p1.time)12.3f"),
2199 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2250 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2200 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2251 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2201 ("p2.renames", "%(p2.renamedfiles)12d"),
2252 ("p2.renames", "%(p2.renamedfiles)12d"),
2202 ("p2.time", "%(p2.time)12.3f"),
2253 ("p2.time", "%(p2.time)12.3f"),
2203 ("renames", "%(nbrenamedfiles)12d"),
2254 ("renames", "%(nbrenamedfiles)12d"),
2204 ("total.time", "%(time)12.3f"),
2255 ("total.time", "%(time)12.3f"),
2205 ]
2256 ]
2206 if not dotiming:
2257 if not dotiming:
2207 output_template = [
2258 output_template = [
2208 i
2259 i
2209 for i in output_template
2260 for i in output_template
2210 if not ('time' in i[0] or 'renames' in i[0])
2261 if not ('time' in i[0] or 'renames' in i[0])
2211 ]
2262 ]
2212 header_names = [h for (h, v) in output_template]
2263 header_names = [h for (h, v) in output_template]
2213 output = ' '.join([v for (h, v) in output_template]) + '\n'
2264 output = ' '.join([v for (h, v) in output_template]) + '\n'
2214 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2265 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2215 fm.plain(header % tuple(header_names))
2266 fm.plain(header % tuple(header_names))
2216
2267
2217 if not revs:
2268 if not revs:
2218 revs = ['all()']
2269 revs = ['all()']
2219 revs = scmutil.revrange(repo, revs)
2270 revs = scmutil.revrange(repo, revs)
2220
2271
2221 if dostats:
2272 if dostats:
2222 alldata = {
2273 alldata = {
2223 'nbrevs': [],
2274 'nbrevs': [],
2224 'nbmissingfiles': [],
2275 'nbmissingfiles': [],
2225 }
2276 }
2226 if dotiming:
2277 if dotiming:
2227 alldata['parentnbrenames'] = []
2278 alldata['parentnbrenames'] = []
2228 alldata['totalnbrenames'] = []
2279 alldata['totalnbrenames'] = []
2229 alldata['parenttime'] = []
2280 alldata['parenttime'] = []
2230 alldata['totaltime'] = []
2281 alldata['totaltime'] = []
2231
2282
2232 roi = repo.revs('merge() and %ld', revs)
2283 roi = repo.revs('merge() and %ld', revs)
2233 for r in roi:
2284 for r in roi:
2234 ctx = repo[r]
2285 ctx = repo[r]
2235 p1 = ctx.p1()
2286 p1 = ctx.p1()
2236 p2 = ctx.p2()
2287 p2 = ctx.p2()
2237 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2288 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2238 for b in bases:
2289 for b in bases:
2239 b = repo[b]
2290 b = repo[b]
2240 p1missing = copies._computeforwardmissing(b, p1)
2291 p1missing = copies._computeforwardmissing(b, p1)
2241 p2missing = copies._computeforwardmissing(b, p2)
2292 p2missing = copies._computeforwardmissing(b, p2)
2242 data = {
2293 data = {
2243 b'base': b.hex(),
2294 b'base': b.hex(),
2244 b'p1.node': p1.hex(),
2295 b'p1.node': p1.hex(),
2245 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2296 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2246 b'p1.nbmissingfiles': len(p1missing),
2297 b'p1.nbmissingfiles': len(p1missing),
2247 b'p2.node': p2.hex(),
2298 b'p2.node': p2.hex(),
2248 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2299 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2249 b'p2.nbmissingfiles': len(p2missing),
2300 b'p2.nbmissingfiles': len(p2missing),
2250 }
2301 }
2251 if dostats:
2302 if dostats:
2252 if p1missing:
2303 if p1missing:
2253 alldata['nbrevs'].append(
2304 alldata['nbrevs'].append(
2254 (data['p1.nbrevs'], b.hex(), p1.hex())
2305 (data['p1.nbrevs'], b.hex(), p1.hex())
2255 )
2306 )
2256 alldata['nbmissingfiles'].append(
2307 alldata['nbmissingfiles'].append(
2257 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2308 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2258 )
2309 )
2259 if p2missing:
2310 if p2missing:
2260 alldata['nbrevs'].append(
2311 alldata['nbrevs'].append(
2261 (data['p2.nbrevs'], b.hex(), p2.hex())
2312 (data['p2.nbrevs'], b.hex(), p2.hex())
2262 )
2313 )
2263 alldata['nbmissingfiles'].append(
2314 alldata['nbmissingfiles'].append(
2264 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2315 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2265 )
2316 )
2266 if dotiming:
2317 if dotiming:
2267 begin = util.timer()
2318 begin = util.timer()
2268 mergedata = copies.mergecopies(repo, p1, p2, b)
2319 mergedata = copies.mergecopies(repo, p1, p2, b)
2269 end = util.timer()
2320 end = util.timer()
2270 # not very stable timing since we did only one run
2321 # not very stable timing since we did only one run
2271 data['time'] = end - begin
2322 data['time'] = end - begin
2272 # mergedata contains five dicts: "copy", "movewithdir",
2323 # mergedata contains five dicts: "copy", "movewithdir",
2273 # "diverge", "renamedelete" and "dirmove".
2324 # "diverge", "renamedelete" and "dirmove".
2274 # The first 4 are about renamed file so lets count that.
2325 # The first 4 are about renamed file so lets count that.
2275 renames = len(mergedata[0])
2326 renames = len(mergedata[0])
2276 renames += len(mergedata[1])
2327 renames += len(mergedata[1])
2277 renames += len(mergedata[2])
2328 renames += len(mergedata[2])
2278 renames += len(mergedata[3])
2329 renames += len(mergedata[3])
2279 data['nbrenamedfiles'] = renames
2330 data['nbrenamedfiles'] = renames
2280 begin = util.timer()
2331 begin = util.timer()
2281 p1renames = copies.pathcopies(b, p1)
2332 p1renames = copies.pathcopies(b, p1)
2282 end = util.timer()
2333 end = util.timer()
2283 data['p1.time'] = end - begin
2334 data['p1.time'] = end - begin
2284 begin = util.timer()
2335 begin = util.timer()
2285 p2renames = copies.pathcopies(b, p2)
2336 p2renames = copies.pathcopies(b, p2)
2286 end = util.timer()
2337 end = util.timer()
2287 data['p2.time'] = end - begin
2338 data['p2.time'] = end - begin
2288 data['p1.renamedfiles'] = len(p1renames)
2339 data['p1.renamedfiles'] = len(p1renames)
2289 data['p2.renamedfiles'] = len(p2renames)
2340 data['p2.renamedfiles'] = len(p2renames)
2290
2341
2291 if dostats:
2342 if dostats:
2292 if p1missing:
2343 if p1missing:
2293 alldata['parentnbrenames'].append(
2344 alldata['parentnbrenames'].append(
2294 (data['p1.renamedfiles'], b.hex(), p1.hex())
2345 (data['p1.renamedfiles'], b.hex(), p1.hex())
2295 )
2346 )
2296 alldata['parenttime'].append(
2347 alldata['parenttime'].append(
2297 (data['p1.time'], b.hex(), p1.hex())
2348 (data['p1.time'], b.hex(), p1.hex())
2298 )
2349 )
2299 if p2missing:
2350 if p2missing:
2300 alldata['parentnbrenames'].append(
2351 alldata['parentnbrenames'].append(
2301 (data['p2.renamedfiles'], b.hex(), p2.hex())
2352 (data['p2.renamedfiles'], b.hex(), p2.hex())
2302 )
2353 )
2303 alldata['parenttime'].append(
2354 alldata['parenttime'].append(
2304 (data['p2.time'], b.hex(), p2.hex())
2355 (data['p2.time'], b.hex(), p2.hex())
2305 )
2356 )
2306 if p1missing or p2missing:
2357 if p1missing or p2missing:
2307 alldata['totalnbrenames'].append(
2358 alldata['totalnbrenames'].append(
2308 (
2359 (
2309 data['nbrenamedfiles'],
2360 data['nbrenamedfiles'],
2310 b.hex(),
2361 b.hex(),
2311 p1.hex(),
2362 p1.hex(),
2312 p2.hex(),
2363 p2.hex(),
2313 )
2364 )
2314 )
2365 )
2315 alldata['totaltime'].append(
2366 alldata['totaltime'].append(
2316 (data['time'], b.hex(), p1.hex(), p2.hex())
2367 (data['time'], b.hex(), p1.hex(), p2.hex())
2317 )
2368 )
2318 fm.startitem()
2369 fm.startitem()
2319 fm.data(**data)
2370 fm.data(**data)
2320 # make node pretty for the human output
2371 # make node pretty for the human output
2321 out = data.copy()
2372 out = data.copy()
2322 out['base'] = fm.hexfunc(b.node())
2373 out['base'] = fm.hexfunc(b.node())
2323 out['p1.node'] = fm.hexfunc(p1.node())
2374 out['p1.node'] = fm.hexfunc(p1.node())
2324 out['p2.node'] = fm.hexfunc(p2.node())
2375 out['p2.node'] = fm.hexfunc(p2.node())
2325 fm.plain(output % out)
2376 fm.plain(output % out)
2326
2377
2327 fm.end()
2378 fm.end()
2328 if dostats:
2379 if dostats:
2329 # use a second formatter because the data are quite different, not sure
2380 # use a second formatter because the data are quite different, not sure
2330 # how it flies with the templater.
2381 # how it flies with the templater.
2331 entries = [
2382 entries = [
2332 ('nbrevs', 'number of revision covered'),
2383 ('nbrevs', 'number of revision covered'),
2333 ('nbmissingfiles', 'number of missing files at head'),
2384 ('nbmissingfiles', 'number of missing files at head'),
2334 ]
2385 ]
2335 if dotiming:
2386 if dotiming:
2336 entries.append(
2387 entries.append(
2337 ('parentnbrenames', 'rename from one parent to base')
2388 ('parentnbrenames', 'rename from one parent to base')
2338 )
2389 )
2339 entries.append(('totalnbrenames', 'total number of renames'))
2390 entries.append(('totalnbrenames', 'total number of renames'))
2340 entries.append(('parenttime', 'time for one parent'))
2391 entries.append(('parenttime', 'time for one parent'))
2341 entries.append(('totaltime', 'time for both parents'))
2392 entries.append(('totaltime', 'time for both parents'))
2342 _displaystats(ui, opts, entries, alldata)
2393 _displaystats(ui, opts, entries, alldata)
2343
2394
2344
2395
2345 @command(
2396 @command(
2346 b'perf::helper-pathcopies|perfhelper-pathcopies',
2397 b'perf::helper-pathcopies|perfhelper-pathcopies',
2347 formatteropts
2398 formatteropts
2348 + [
2399 + [
2349 (b'r', b'revs', [], b'restrict search to these revisions'),
2400 (b'r', b'revs', [], b'restrict search to these revisions'),
2350 (b'', b'timing', False, b'provides extra data (costly)'),
2401 (b'', b'timing', False, b'provides extra data (costly)'),
2351 (b'', b'stats', False, b'provides statistic about the measured data'),
2402 (b'', b'stats', False, b'provides statistic about the measured data'),
2352 ],
2403 ],
2353 )
2404 )
2354 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2405 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2355 """find statistic about potential parameters for the `perftracecopies`
2406 """find statistic about potential parameters for the `perftracecopies`
2356
2407
2357 This command find source-destination pair relevant for copytracing testing.
2408 This command find source-destination pair relevant for copytracing testing.
2358 It report value for some of the parameters that impact copy tracing time.
2409 It report value for some of the parameters that impact copy tracing time.
2359
2410
2360 If `--timing` is set, rename detection is run and the associated timing
2411 If `--timing` is set, rename detection is run and the associated timing
2361 will be reported. The extra details comes at the cost of a slower command
2412 will be reported. The extra details comes at the cost of a slower command
2362 execution.
2413 execution.
2363
2414
2364 Since the rename detection is only run once, other factors might easily
2415 Since the rename detection is only run once, other factors might easily
2365 affect the precision of the timing. However it should give a good
2416 affect the precision of the timing. However it should give a good
2366 approximation of which revision pairs are very costly.
2417 approximation of which revision pairs are very costly.
2367 """
2418 """
2368 opts = _byteskwargs(opts)
2419 opts = _byteskwargs(opts)
2369 fm = ui.formatter(b'perf', opts)
2420 fm = ui.formatter(b'perf', opts)
2370 dotiming = opts[b'timing']
2421 dotiming = opts[b'timing']
2371 dostats = opts[b'stats']
2422 dostats = opts[b'stats']
2372
2423
2373 if dotiming:
2424 if dotiming:
2374 header = '%12s %12s %12s %12s %12s %12s\n'
2425 header = '%12s %12s %12s %12s %12s %12s\n'
2375 output = (
2426 output = (
2376 "%(source)12s %(destination)12s "
2427 "%(source)12s %(destination)12s "
2377 "%(nbrevs)12d %(nbmissingfiles)12d "
2428 "%(nbrevs)12d %(nbmissingfiles)12d "
2378 "%(nbrenamedfiles)12d %(time)18.5f\n"
2429 "%(nbrenamedfiles)12d %(time)18.5f\n"
2379 )
2430 )
2380 header_names = (
2431 header_names = (
2381 "source",
2432 "source",
2382 "destination",
2433 "destination",
2383 "nb-revs",
2434 "nb-revs",
2384 "nb-files",
2435 "nb-files",
2385 "nb-renames",
2436 "nb-renames",
2386 "time",
2437 "time",
2387 )
2438 )
2388 fm.plain(header % header_names)
2439 fm.plain(header % header_names)
2389 else:
2440 else:
2390 header = '%12s %12s %12s %12s\n'
2441 header = '%12s %12s %12s %12s\n'
2391 output = (
2442 output = (
2392 "%(source)12s %(destination)12s "
2443 "%(source)12s %(destination)12s "
2393 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2444 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2394 )
2445 )
2395 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2446 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2396
2447
2397 if not revs:
2448 if not revs:
2398 revs = ['all()']
2449 revs = ['all()']
2399 revs = scmutil.revrange(repo, revs)
2450 revs = scmutil.revrange(repo, revs)
2400
2451
2401 if dostats:
2452 if dostats:
2402 alldata = {
2453 alldata = {
2403 'nbrevs': [],
2454 'nbrevs': [],
2404 'nbmissingfiles': [],
2455 'nbmissingfiles': [],
2405 }
2456 }
2406 if dotiming:
2457 if dotiming:
2407 alldata['nbrenames'] = []
2458 alldata['nbrenames'] = []
2408 alldata['time'] = []
2459 alldata['time'] = []
2409
2460
2410 roi = repo.revs('merge() and %ld', revs)
2461 roi = repo.revs('merge() and %ld', revs)
2411 for r in roi:
2462 for r in roi:
2412 ctx = repo[r]
2463 ctx = repo[r]
2413 p1 = ctx.p1().rev()
2464 p1 = ctx.p1().rev()
2414 p2 = ctx.p2().rev()
2465 p2 = ctx.p2().rev()
2415 bases = repo.changelog._commonancestorsheads(p1, p2)
2466 bases = repo.changelog._commonancestorsheads(p1, p2)
2416 for p in (p1, p2):
2467 for p in (p1, p2):
2417 for b in bases:
2468 for b in bases:
2418 base = repo[b]
2469 base = repo[b]
2419 parent = repo[p]
2470 parent = repo[p]
2420 missing = copies._computeforwardmissing(base, parent)
2471 missing = copies._computeforwardmissing(base, parent)
2421 if not missing:
2472 if not missing:
2422 continue
2473 continue
2423 data = {
2474 data = {
2424 b'source': base.hex(),
2475 b'source': base.hex(),
2425 b'destination': parent.hex(),
2476 b'destination': parent.hex(),
2426 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2477 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2427 b'nbmissingfiles': len(missing),
2478 b'nbmissingfiles': len(missing),
2428 }
2479 }
2429 if dostats:
2480 if dostats:
2430 alldata['nbrevs'].append(
2481 alldata['nbrevs'].append(
2431 (
2482 (
2432 data['nbrevs'],
2483 data['nbrevs'],
2433 base.hex(),
2484 base.hex(),
2434 parent.hex(),
2485 parent.hex(),
2435 )
2486 )
2436 )
2487 )
2437 alldata['nbmissingfiles'].append(
2488 alldata['nbmissingfiles'].append(
2438 (
2489 (
2439 data['nbmissingfiles'],
2490 data['nbmissingfiles'],
2440 base.hex(),
2491 base.hex(),
2441 parent.hex(),
2492 parent.hex(),
2442 )
2493 )
2443 )
2494 )
2444 if dotiming:
2495 if dotiming:
2445 begin = util.timer()
2496 begin = util.timer()
2446 renames = copies.pathcopies(base, parent)
2497 renames = copies.pathcopies(base, parent)
2447 end = util.timer()
2498 end = util.timer()
2448 # not very stable timing since we did only one run
2499 # not very stable timing since we did only one run
2449 data['time'] = end - begin
2500 data['time'] = end - begin
2450 data['nbrenamedfiles'] = len(renames)
2501 data['nbrenamedfiles'] = len(renames)
2451 if dostats:
2502 if dostats:
2452 alldata['time'].append(
2503 alldata['time'].append(
2453 (
2504 (
2454 data['time'],
2505 data['time'],
2455 base.hex(),
2506 base.hex(),
2456 parent.hex(),
2507 parent.hex(),
2457 )
2508 )
2458 )
2509 )
2459 alldata['nbrenames'].append(
2510 alldata['nbrenames'].append(
2460 (
2511 (
2461 data['nbrenamedfiles'],
2512 data['nbrenamedfiles'],
2462 base.hex(),
2513 base.hex(),
2463 parent.hex(),
2514 parent.hex(),
2464 )
2515 )
2465 )
2516 )
2466 fm.startitem()
2517 fm.startitem()
2467 fm.data(**data)
2518 fm.data(**data)
2468 out = data.copy()
2519 out = data.copy()
2469 out['source'] = fm.hexfunc(base.node())
2520 out['source'] = fm.hexfunc(base.node())
2470 out['destination'] = fm.hexfunc(parent.node())
2521 out['destination'] = fm.hexfunc(parent.node())
2471 fm.plain(output % out)
2522 fm.plain(output % out)
2472
2523
2473 fm.end()
2524 fm.end()
2474 if dostats:
2525 if dostats:
2475 entries = [
2526 entries = [
2476 ('nbrevs', 'number of revision covered'),
2527 ('nbrevs', 'number of revision covered'),
2477 ('nbmissingfiles', 'number of missing files at head'),
2528 ('nbmissingfiles', 'number of missing files at head'),
2478 ]
2529 ]
2479 if dotiming:
2530 if dotiming:
2480 entries.append(('nbrenames', 'renamed files'))
2531 entries.append(('nbrenames', 'renamed files'))
2481 entries.append(('time', 'time'))
2532 entries.append(('time', 'time'))
2482 _displaystats(ui, opts, entries, alldata)
2533 _displaystats(ui, opts, entries, alldata)
2483
2534
2484
2535
2485 @command(b'perf::cca|perfcca', formatteropts)
2536 @command(b'perf::cca|perfcca', formatteropts)
2486 def perfcca(ui, repo, **opts):
2537 def perfcca(ui, repo, **opts):
2487 opts = _byteskwargs(opts)
2538 opts = _byteskwargs(opts)
2488 timer, fm = gettimer(ui, opts)
2539 timer, fm = gettimer(ui, opts)
2489 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2540 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2490 fm.end()
2541 fm.end()
2491
2542
2492
2543
2493 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2544 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2494 def perffncacheload(ui, repo, **opts):
2545 def perffncacheload(ui, repo, **opts):
2495 opts = _byteskwargs(opts)
2546 opts = _byteskwargs(opts)
2496 timer, fm = gettimer(ui, opts)
2547 timer, fm = gettimer(ui, opts)
2497 s = repo.store
2548 s = repo.store
2498
2549
2499 def d():
2550 def d():
2500 s.fncache._load()
2551 s.fncache._load()
2501
2552
2502 timer(d)
2553 timer(d)
2503 fm.end()
2554 fm.end()
2504
2555
2505
2556
2506 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2557 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2507 def perffncachewrite(ui, repo, **opts):
2558 def perffncachewrite(ui, repo, **opts):
2508 opts = _byteskwargs(opts)
2559 opts = _byteskwargs(opts)
2509 timer, fm = gettimer(ui, opts)
2560 timer, fm = gettimer(ui, opts)
2510 s = repo.store
2561 s = repo.store
2511 lock = repo.lock()
2562 lock = repo.lock()
2512 s.fncache._load()
2563 s.fncache._load()
2513 tr = repo.transaction(b'perffncachewrite')
2564 tr = repo.transaction(b'perffncachewrite')
2514 tr.addbackup(b'fncache')
2565 tr.addbackup(b'fncache')
2515
2566
2516 def d():
2567 def d():
2517 s.fncache._dirty = True
2568 s.fncache._dirty = True
2518 s.fncache.write(tr)
2569 s.fncache.write(tr)
2519
2570
2520 timer(d)
2571 timer(d)
2521 tr.close()
2572 tr.close()
2522 lock.release()
2573 lock.release()
2523 fm.end()
2574 fm.end()
2524
2575
2525
2576
2526 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2577 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2527 def perffncacheencode(ui, repo, **opts):
2578 def perffncacheencode(ui, repo, **opts):
2528 opts = _byteskwargs(opts)
2579 opts = _byteskwargs(opts)
2529 timer, fm = gettimer(ui, opts)
2580 timer, fm = gettimer(ui, opts)
2530 s = repo.store
2581 s = repo.store
2531 s.fncache._load()
2582 s.fncache._load()
2532
2583
2533 def d():
2584 def d():
2534 for p in s.fncache.entries:
2585 for p in s.fncache.entries:
2535 s.encode(p)
2586 s.encode(p)
2536
2587
2537 timer(d)
2588 timer(d)
2538 fm.end()
2589 fm.end()
2539
2590
2540
2591
2541 def _bdiffworker(q, blocks, xdiff, ready, done):
2592 def _bdiffworker(q, blocks, xdiff, ready, done):
2542 while not done.is_set():
2593 while not done.is_set():
2543 pair = q.get()
2594 pair = q.get()
2544 while pair is not None:
2595 while pair is not None:
2545 if xdiff:
2596 if xdiff:
2546 mdiff.bdiff.xdiffblocks(*pair)
2597 mdiff.bdiff.xdiffblocks(*pair)
2547 elif blocks:
2598 elif blocks:
2548 mdiff.bdiff.blocks(*pair)
2599 mdiff.bdiff.blocks(*pair)
2549 else:
2600 else:
2550 mdiff.textdiff(*pair)
2601 mdiff.textdiff(*pair)
2551 q.task_done()
2602 q.task_done()
2552 pair = q.get()
2603 pair = q.get()
2553 q.task_done() # for the None one
2604 q.task_done() # for the None one
2554 with ready:
2605 with ready:
2555 ready.wait()
2606 ready.wait()
2556
2607
2557
2608
2558 def _manifestrevision(repo, mnode):
2609 def _manifestrevision(repo, mnode):
2559 ml = repo.manifestlog
2610 ml = repo.manifestlog
2560
2611
2561 if util.safehasattr(ml, b'getstorage'):
2612 if util.safehasattr(ml, b'getstorage'):
2562 store = ml.getstorage(b'')
2613 store = ml.getstorage(b'')
2563 else:
2614 else:
2564 store = ml._revlog
2615 store = ml._revlog
2565
2616
2566 return store.revision(mnode)
2617 return store.revision(mnode)
2567
2618
2568
2619
2569 @command(
2620 @command(
2570 b'perf::bdiff|perfbdiff',
2621 b'perf::bdiff|perfbdiff',
2571 revlogopts
2622 revlogopts
2572 + formatteropts
2623 + formatteropts
2573 + [
2624 + [
2574 (
2625 (
2575 b'',
2626 b'',
2576 b'count',
2627 b'count',
2577 1,
2628 1,
2578 b'number of revisions to test (when using --startrev)',
2629 b'number of revisions to test (when using --startrev)',
2579 ),
2630 ),
2580 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2631 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2581 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2632 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2582 (b'', b'blocks', False, b'test computing diffs into blocks'),
2633 (b'', b'blocks', False, b'test computing diffs into blocks'),
2583 (b'', b'xdiff', False, b'use xdiff algorithm'),
2634 (b'', b'xdiff', False, b'use xdiff algorithm'),
2584 ],
2635 ],
2585 b'-c|-m|FILE REV',
2636 b'-c|-m|FILE REV',
2586 )
2637 )
2587 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2638 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2588 """benchmark a bdiff between revisions
2639 """benchmark a bdiff between revisions
2589
2640
2590 By default, benchmark a bdiff between its delta parent and itself.
2641 By default, benchmark a bdiff between its delta parent and itself.
2591
2642
2592 With ``--count``, benchmark bdiffs between delta parents and self for N
2643 With ``--count``, benchmark bdiffs between delta parents and self for N
2593 revisions starting at the specified revision.
2644 revisions starting at the specified revision.
2594
2645
2595 With ``--alldata``, assume the requested revision is a changeset and
2646 With ``--alldata``, assume the requested revision is a changeset and
2596 measure bdiffs for all changes related to that changeset (manifest
2647 measure bdiffs for all changes related to that changeset (manifest
2597 and filelogs).
2648 and filelogs).
2598 """
2649 """
2599 opts = _byteskwargs(opts)
2650 opts = _byteskwargs(opts)
2600
2651
2601 if opts[b'xdiff'] and not opts[b'blocks']:
2652 if opts[b'xdiff'] and not opts[b'blocks']:
2602 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2653 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2603
2654
2604 if opts[b'alldata']:
2655 if opts[b'alldata']:
2605 opts[b'changelog'] = True
2656 opts[b'changelog'] = True
2606
2657
2607 if opts.get(b'changelog') or opts.get(b'manifest'):
2658 if opts.get(b'changelog') or opts.get(b'manifest'):
2608 file_, rev = None, file_
2659 file_, rev = None, file_
2609 elif rev is None:
2660 elif rev is None:
2610 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2661 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2611
2662
2612 blocks = opts[b'blocks']
2663 blocks = opts[b'blocks']
2613 xdiff = opts[b'xdiff']
2664 xdiff = opts[b'xdiff']
2614 textpairs = []
2665 textpairs = []
2615
2666
2616 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2667 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2617
2668
2618 startrev = r.rev(r.lookup(rev))
2669 startrev = r.rev(r.lookup(rev))
2619 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2670 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2620 if opts[b'alldata']:
2671 if opts[b'alldata']:
2621 # Load revisions associated with changeset.
2672 # Load revisions associated with changeset.
2622 ctx = repo[rev]
2673 ctx = repo[rev]
2623 mtext = _manifestrevision(repo, ctx.manifestnode())
2674 mtext = _manifestrevision(repo, ctx.manifestnode())
2624 for pctx in ctx.parents():
2675 for pctx in ctx.parents():
2625 pman = _manifestrevision(repo, pctx.manifestnode())
2676 pman = _manifestrevision(repo, pctx.manifestnode())
2626 textpairs.append((pman, mtext))
2677 textpairs.append((pman, mtext))
2627
2678
2628 # Load filelog revisions by iterating manifest delta.
2679 # Load filelog revisions by iterating manifest delta.
2629 man = ctx.manifest()
2680 man = ctx.manifest()
2630 pman = ctx.p1().manifest()
2681 pman = ctx.p1().manifest()
2631 for filename, change in pman.diff(man).items():
2682 for filename, change in pman.diff(man).items():
2632 fctx = repo.file(filename)
2683 fctx = repo.file(filename)
2633 f1 = fctx.revision(change[0][0] or -1)
2684 f1 = fctx.revision(change[0][0] or -1)
2634 f2 = fctx.revision(change[1][0] or -1)
2685 f2 = fctx.revision(change[1][0] or -1)
2635 textpairs.append((f1, f2))
2686 textpairs.append((f1, f2))
2636 else:
2687 else:
2637 dp = r.deltaparent(rev)
2688 dp = r.deltaparent(rev)
2638 textpairs.append((r.revision(dp), r.revision(rev)))
2689 textpairs.append((r.revision(dp), r.revision(rev)))
2639
2690
2640 withthreads = threads > 0
2691 withthreads = threads > 0
2641 if not withthreads:
2692 if not withthreads:
2642
2693
2643 def d():
2694 def d():
2644 for pair in textpairs:
2695 for pair in textpairs:
2645 if xdiff:
2696 if xdiff:
2646 mdiff.bdiff.xdiffblocks(*pair)
2697 mdiff.bdiff.xdiffblocks(*pair)
2647 elif blocks:
2698 elif blocks:
2648 mdiff.bdiff.blocks(*pair)
2699 mdiff.bdiff.blocks(*pair)
2649 else:
2700 else:
2650 mdiff.textdiff(*pair)
2701 mdiff.textdiff(*pair)
2651
2702
2652 else:
2703 else:
2653 q = queue()
2704 q = queue()
2654 for i in _xrange(threads):
2705 for i in _xrange(threads):
2655 q.put(None)
2706 q.put(None)
2656 ready = threading.Condition()
2707 ready = threading.Condition()
2657 done = threading.Event()
2708 done = threading.Event()
2658 for i in _xrange(threads):
2709 for i in _xrange(threads):
2659 threading.Thread(
2710 threading.Thread(
2660 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2711 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2661 ).start()
2712 ).start()
2662 q.join()
2713 q.join()
2663
2714
2664 def d():
2715 def d():
2665 for pair in textpairs:
2716 for pair in textpairs:
2666 q.put(pair)
2717 q.put(pair)
2667 for i in _xrange(threads):
2718 for i in _xrange(threads):
2668 q.put(None)
2719 q.put(None)
2669 with ready:
2720 with ready:
2670 ready.notify_all()
2721 ready.notify_all()
2671 q.join()
2722 q.join()
2672
2723
2673 timer, fm = gettimer(ui, opts)
2724 timer, fm = gettimer(ui, opts)
2674 timer(d)
2725 timer(d)
2675 fm.end()
2726 fm.end()
2676
2727
2677 if withthreads:
2728 if withthreads:
2678 done.set()
2729 done.set()
2679 for i in _xrange(threads):
2730 for i in _xrange(threads):
2680 q.put(None)
2731 q.put(None)
2681 with ready:
2732 with ready:
2682 ready.notify_all()
2733 ready.notify_all()
2683
2734
2684
2735
2685 @command(
2736 @command(
2686 b'perf::unbundle',
2737 b'perf::unbundle',
2687 formatteropts,
2738 formatteropts,
2688 b'BUNDLE_FILE',
2739 b'BUNDLE_FILE',
2689 )
2740 )
2690 def perf_unbundle(ui, repo, fname, **opts):
2741 def perf_unbundle(ui, repo, fname, **opts):
2691 """benchmark application of a bundle in a repository.
2742 """benchmark application of a bundle in a repository.
2692
2743
2693 This does not include the final transaction processing"""
2744 This does not include the final transaction processing"""
2694
2745
2695 from mercurial import exchange
2746 from mercurial import exchange
2696 from mercurial import bundle2
2747 from mercurial import bundle2
2697 from mercurial import transaction
2748 from mercurial import transaction
2698
2749
2699 opts = _byteskwargs(opts)
2750 opts = _byteskwargs(opts)
2700
2751
2701 ### some compatibility hotfix
2752 ### some compatibility hotfix
2702 #
2753 #
2703 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2754 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2704 # critical regression that break transaction rollback for files that are
2755 # critical regression that break transaction rollback for files that are
2705 # de-inlined.
2756 # de-inlined.
2706 method = transaction.transaction._addentry
2757 method = transaction.transaction._addentry
2707 pre_63edc384d3b7 = "data" in getargspec(method).args
2758 pre_63edc384d3b7 = "data" in getargspec(method).args
2708 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2759 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2709 # a changeset that is a close descendant of 18415fc918a1, the changeset
2760 # a changeset that is a close descendant of 18415fc918a1, the changeset
2710 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2761 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2711 args = getargspec(error.Abort.__init__).args
2762 args = getargspec(error.Abort.__init__).args
2712 post_18415fc918a1 = "detailed_exit_code" in args
2763 post_18415fc918a1 = "detailed_exit_code" in args
2713
2764
2714 old_max_inline = None
2765 old_max_inline = None
2715 try:
2766 try:
2716 if not (pre_63edc384d3b7 or post_18415fc918a1):
2767 if not (pre_63edc384d3b7 or post_18415fc918a1):
2717 # disable inlining
2768 # disable inlining
2718 old_max_inline = mercurial.revlog._maxinline
2769 old_max_inline = mercurial.revlog._maxinline
2719 # large enough to never happen
2770 # large enough to never happen
2720 mercurial.revlog._maxinline = 2 ** 50
2771 mercurial.revlog._maxinline = 2 ** 50
2721
2772
2722 with repo.lock():
2773 with repo.lock():
2723 bundle = [None, None]
2774 bundle = [None, None]
2724 orig_quiet = repo.ui.quiet
2775 orig_quiet = repo.ui.quiet
2725 try:
2776 try:
2726 repo.ui.quiet = True
2777 repo.ui.quiet = True
2727 with open(fname, mode="rb") as f:
2778 with open(fname, mode="rb") as f:
2728
2779
2729 def noop_report(*args, **kwargs):
2780 def noop_report(*args, **kwargs):
2730 pass
2781 pass
2731
2782
2732 def setup():
2783 def setup():
2733 gen, tr = bundle
2784 gen, tr = bundle
2734 if tr is not None:
2785 if tr is not None:
2735 tr.abort()
2786 tr.abort()
2736 bundle[:] = [None, None]
2787 bundle[:] = [None, None]
2737 f.seek(0)
2788 f.seek(0)
2738 bundle[0] = exchange.readbundle(ui, f, fname)
2789 bundle[0] = exchange.readbundle(ui, f, fname)
2739 bundle[1] = repo.transaction(b'perf::unbundle')
2790 bundle[1] = repo.transaction(b'perf::unbundle')
2740 # silence the transaction
2791 # silence the transaction
2741 bundle[1]._report = noop_report
2792 bundle[1]._report = noop_report
2742
2793
2743 def apply():
2794 def apply():
2744 gen, tr = bundle
2795 gen, tr = bundle
2745 bundle2.applybundle(
2796 bundle2.applybundle(
2746 repo,
2797 repo,
2747 gen,
2798 gen,
2748 tr,
2799 tr,
2749 source=b'perf::unbundle',
2800 source=b'perf::unbundle',
2750 url=fname,
2801 url=fname,
2751 )
2802 )
2752
2803
2753 timer, fm = gettimer(ui, opts)
2804 timer, fm = gettimer(ui, opts)
2754 timer(apply, setup=setup)
2805 timer(apply, setup=setup)
2755 fm.end()
2806 fm.end()
2756 finally:
2807 finally:
2757 repo.ui.quiet == orig_quiet
2808 repo.ui.quiet == orig_quiet
2758 gen, tr = bundle
2809 gen, tr = bundle
2759 if tr is not None:
2810 if tr is not None:
2760 tr.abort()
2811 tr.abort()
2761 finally:
2812 finally:
2762 if old_max_inline is not None:
2813 if old_max_inline is not None:
2763 mercurial.revlog._maxinline = old_max_inline
2814 mercurial.revlog._maxinline = old_max_inline
2764
2815
2765
2816
2766 @command(
2817 @command(
2767 b'perf::unidiff|perfunidiff',
2818 b'perf::unidiff|perfunidiff',
2768 revlogopts
2819 revlogopts
2769 + formatteropts
2820 + formatteropts
2770 + [
2821 + [
2771 (
2822 (
2772 b'',
2823 b'',
2773 b'count',
2824 b'count',
2774 1,
2825 1,
2775 b'number of revisions to test (when using --startrev)',
2826 b'number of revisions to test (when using --startrev)',
2776 ),
2827 ),
2777 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2828 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2778 ],
2829 ],
2779 b'-c|-m|FILE REV',
2830 b'-c|-m|FILE REV',
2780 )
2831 )
2781 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2832 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2782 """benchmark a unified diff between revisions
2833 """benchmark a unified diff between revisions
2783
2834
2784 This doesn't include any copy tracing - it's just a unified diff
2835 This doesn't include any copy tracing - it's just a unified diff
2785 of the texts.
2836 of the texts.
2786
2837
2787 By default, benchmark a diff between its delta parent and itself.
2838 By default, benchmark a diff between its delta parent and itself.
2788
2839
2789 With ``--count``, benchmark diffs between delta parents and self for N
2840 With ``--count``, benchmark diffs between delta parents and self for N
2790 revisions starting at the specified revision.
2841 revisions starting at the specified revision.
2791
2842
2792 With ``--alldata``, assume the requested revision is a changeset and
2843 With ``--alldata``, assume the requested revision is a changeset and
2793 measure diffs for all changes related to that changeset (manifest
2844 measure diffs for all changes related to that changeset (manifest
2794 and filelogs).
2845 and filelogs).
2795 """
2846 """
2796 opts = _byteskwargs(opts)
2847 opts = _byteskwargs(opts)
2797 if opts[b'alldata']:
2848 if opts[b'alldata']:
2798 opts[b'changelog'] = True
2849 opts[b'changelog'] = True
2799
2850
2800 if opts.get(b'changelog') or opts.get(b'manifest'):
2851 if opts.get(b'changelog') or opts.get(b'manifest'):
2801 file_, rev = None, file_
2852 file_, rev = None, file_
2802 elif rev is None:
2853 elif rev is None:
2803 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2854 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2804
2855
2805 textpairs = []
2856 textpairs = []
2806
2857
2807 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2858 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2808
2859
2809 startrev = r.rev(r.lookup(rev))
2860 startrev = r.rev(r.lookup(rev))
2810 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2861 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2811 if opts[b'alldata']:
2862 if opts[b'alldata']:
2812 # Load revisions associated with changeset.
2863 # Load revisions associated with changeset.
2813 ctx = repo[rev]
2864 ctx = repo[rev]
2814 mtext = _manifestrevision(repo, ctx.manifestnode())
2865 mtext = _manifestrevision(repo, ctx.manifestnode())
2815 for pctx in ctx.parents():
2866 for pctx in ctx.parents():
2816 pman = _manifestrevision(repo, pctx.manifestnode())
2867 pman = _manifestrevision(repo, pctx.manifestnode())
2817 textpairs.append((pman, mtext))
2868 textpairs.append((pman, mtext))
2818
2869
2819 # Load filelog revisions by iterating manifest delta.
2870 # Load filelog revisions by iterating manifest delta.
2820 man = ctx.manifest()
2871 man = ctx.manifest()
2821 pman = ctx.p1().manifest()
2872 pman = ctx.p1().manifest()
2822 for filename, change in pman.diff(man).items():
2873 for filename, change in pman.diff(man).items():
2823 fctx = repo.file(filename)
2874 fctx = repo.file(filename)
2824 f1 = fctx.revision(change[0][0] or -1)
2875 f1 = fctx.revision(change[0][0] or -1)
2825 f2 = fctx.revision(change[1][0] or -1)
2876 f2 = fctx.revision(change[1][0] or -1)
2826 textpairs.append((f1, f2))
2877 textpairs.append((f1, f2))
2827 else:
2878 else:
2828 dp = r.deltaparent(rev)
2879 dp = r.deltaparent(rev)
2829 textpairs.append((r.revision(dp), r.revision(rev)))
2880 textpairs.append((r.revision(dp), r.revision(rev)))
2830
2881
2831 def d():
2882 def d():
2832 for left, right in textpairs:
2883 for left, right in textpairs:
2833 # The date strings don't matter, so we pass empty strings.
2884 # The date strings don't matter, so we pass empty strings.
2834 headerlines, hunks = mdiff.unidiff(
2885 headerlines, hunks = mdiff.unidiff(
2835 left, b'', right, b'', b'left', b'right', binary=False
2886 left, b'', right, b'', b'left', b'right', binary=False
2836 )
2887 )
2837 # consume iterators in roughly the way patch.py does
2888 # consume iterators in roughly the way patch.py does
2838 b'\n'.join(headerlines)
2889 b'\n'.join(headerlines)
2839 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2890 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2840
2891
2841 timer, fm = gettimer(ui, opts)
2892 timer, fm = gettimer(ui, opts)
2842 timer(d)
2893 timer(d)
2843 fm.end()
2894 fm.end()
2844
2895
2845
2896
2846 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2897 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2847 def perfdiffwd(ui, repo, **opts):
2898 def perfdiffwd(ui, repo, **opts):
2848 """Profile diff of working directory changes"""
2899 """Profile diff of working directory changes"""
2849 opts = _byteskwargs(opts)
2900 opts = _byteskwargs(opts)
2850 timer, fm = gettimer(ui, opts)
2901 timer, fm = gettimer(ui, opts)
2851 options = {
2902 options = {
2852 'w': 'ignore_all_space',
2903 'w': 'ignore_all_space',
2853 'b': 'ignore_space_change',
2904 'b': 'ignore_space_change',
2854 'B': 'ignore_blank_lines',
2905 'B': 'ignore_blank_lines',
2855 }
2906 }
2856
2907
2857 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2908 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2858 opts = {options[c]: b'1' for c in diffopt}
2909 opts = {options[c]: b'1' for c in diffopt}
2859
2910
2860 def d():
2911 def d():
2861 ui.pushbuffer()
2912 ui.pushbuffer()
2862 commands.diff(ui, repo, **opts)
2913 commands.diff(ui, repo, **opts)
2863 ui.popbuffer()
2914 ui.popbuffer()
2864
2915
2865 diffopt = diffopt.encode('ascii')
2916 diffopt = diffopt.encode('ascii')
2866 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2917 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2867 timer(d, title=title)
2918 timer(d, title=title)
2868 fm.end()
2919 fm.end()
2869
2920
2870
2921
2871 @command(
2922 @command(
2872 b'perf::revlogindex|perfrevlogindex',
2923 b'perf::revlogindex|perfrevlogindex',
2873 revlogopts + formatteropts,
2924 revlogopts + formatteropts,
2874 b'-c|-m|FILE',
2925 b'-c|-m|FILE',
2875 )
2926 )
2876 def perfrevlogindex(ui, repo, file_=None, **opts):
2927 def perfrevlogindex(ui, repo, file_=None, **opts):
2877 """Benchmark operations against a revlog index.
2928 """Benchmark operations against a revlog index.
2878
2929
2879 This tests constructing a revlog instance, reading index data,
2930 This tests constructing a revlog instance, reading index data,
2880 parsing index data, and performing various operations related to
2931 parsing index data, and performing various operations related to
2881 index data.
2932 index data.
2882 """
2933 """
2883
2934
2884 opts = _byteskwargs(opts)
2935 opts = _byteskwargs(opts)
2885
2936
2886 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2937 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2887
2938
2888 opener = getattr(rl, 'opener') # trick linter
2939 opener = getattr(rl, 'opener') # trick linter
2889 # compat with hg <= 5.8
2940 # compat with hg <= 5.8
2890 radix = getattr(rl, 'radix', None)
2941 radix = getattr(rl, 'radix', None)
2891 indexfile = getattr(rl, '_indexfile', None)
2942 indexfile = getattr(rl, '_indexfile', None)
2892 if indexfile is None:
2943 if indexfile is None:
2893 # compatibility with <= hg-5.8
2944 # compatibility with <= hg-5.8
2894 indexfile = getattr(rl, 'indexfile')
2945 indexfile = getattr(rl, 'indexfile')
2895 data = opener.read(indexfile)
2946 data = opener.read(indexfile)
2896
2947
2897 header = struct.unpack(b'>I', data[0:4])[0]
2948 header = struct.unpack(b'>I', data[0:4])[0]
2898 version = header & 0xFFFF
2949 version = header & 0xFFFF
2899 if version == 1:
2950 if version == 1:
2900 inline = header & (1 << 16)
2951 inline = header & (1 << 16)
2901 else:
2952 else:
2902 raise error.Abort(b'unsupported revlog version: %d' % version)
2953 raise error.Abort(b'unsupported revlog version: %d' % version)
2903
2954
2904 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2955 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2905 if parse_index_v1 is None:
2956 if parse_index_v1 is None:
2906 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2957 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2907
2958
2908 rllen = len(rl)
2959 rllen = len(rl)
2909
2960
2910 node0 = rl.node(0)
2961 node0 = rl.node(0)
2911 node25 = rl.node(rllen // 4)
2962 node25 = rl.node(rllen // 4)
2912 node50 = rl.node(rllen // 2)
2963 node50 = rl.node(rllen // 2)
2913 node75 = rl.node(rllen // 4 * 3)
2964 node75 = rl.node(rllen // 4 * 3)
2914 node100 = rl.node(rllen - 1)
2965 node100 = rl.node(rllen - 1)
2915
2966
2916 allrevs = range(rllen)
2967 allrevs = range(rllen)
2917 allrevsrev = list(reversed(allrevs))
2968 allrevsrev = list(reversed(allrevs))
2918 allnodes = [rl.node(rev) for rev in range(rllen)]
2969 allnodes = [rl.node(rev) for rev in range(rllen)]
2919 allnodesrev = list(reversed(allnodes))
2970 allnodesrev = list(reversed(allnodes))
2920
2971
2921 def constructor():
2972 def constructor():
2922 if radix is not None:
2973 if radix is not None:
2923 revlog(opener, radix=radix)
2974 revlog(opener, radix=radix)
2924 else:
2975 else:
2925 # hg <= 5.8
2976 # hg <= 5.8
2926 revlog(opener, indexfile=indexfile)
2977 revlog(opener, indexfile=indexfile)
2927
2978
2928 def read():
2979 def read():
2929 with opener(indexfile) as fh:
2980 with opener(indexfile) as fh:
2930 fh.read()
2981 fh.read()
2931
2982
2932 def parseindex():
2983 def parseindex():
2933 parse_index_v1(data, inline)
2984 parse_index_v1(data, inline)
2934
2985
2935 def getentry(revornode):
2986 def getentry(revornode):
2936 index = parse_index_v1(data, inline)[0]
2987 index = parse_index_v1(data, inline)[0]
2937 index[revornode]
2988 index[revornode]
2938
2989
2939 def getentries(revs, count=1):
2990 def getentries(revs, count=1):
2940 index = parse_index_v1(data, inline)[0]
2991 index = parse_index_v1(data, inline)[0]
2941
2992
2942 for i in range(count):
2993 for i in range(count):
2943 for rev in revs:
2994 for rev in revs:
2944 index[rev]
2995 index[rev]
2945
2996
2946 def resolvenode(node):
2997 def resolvenode(node):
2947 index = parse_index_v1(data, inline)[0]
2998 index = parse_index_v1(data, inline)[0]
2948 rev = getattr(index, 'rev', None)
2999 rev = getattr(index, 'rev', None)
2949 if rev is None:
3000 if rev is None:
2950 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3001 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2951 # This only works for the C code.
3002 # This only works for the C code.
2952 if nodemap is None:
3003 if nodemap is None:
2953 return
3004 return
2954 rev = nodemap.__getitem__
3005 rev = nodemap.__getitem__
2955
3006
2956 try:
3007 try:
2957 rev(node)
3008 rev(node)
2958 except error.RevlogError:
3009 except error.RevlogError:
2959 pass
3010 pass
2960
3011
2961 def resolvenodes(nodes, count=1):
3012 def resolvenodes(nodes, count=1):
2962 index = parse_index_v1(data, inline)[0]
3013 index = parse_index_v1(data, inline)[0]
2963 rev = getattr(index, 'rev', None)
3014 rev = getattr(index, 'rev', None)
2964 if rev is None:
3015 if rev is None:
2965 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3016 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2966 # This only works for the C code.
3017 # This only works for the C code.
2967 if nodemap is None:
3018 if nodemap is None:
2968 return
3019 return
2969 rev = nodemap.__getitem__
3020 rev = nodemap.__getitem__
2970
3021
2971 for i in range(count):
3022 for i in range(count):
2972 for node in nodes:
3023 for node in nodes:
2973 try:
3024 try:
2974 rev(node)
3025 rev(node)
2975 except error.RevlogError:
3026 except error.RevlogError:
2976 pass
3027 pass
2977
3028
2978 benches = [
3029 benches = [
2979 (constructor, b'revlog constructor'),
3030 (constructor, b'revlog constructor'),
2980 (read, b'read'),
3031 (read, b'read'),
2981 (parseindex, b'create index object'),
3032 (parseindex, b'create index object'),
2982 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3033 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2983 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3034 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2984 (lambda: resolvenode(node0), b'look up node at rev 0'),
3035 (lambda: resolvenode(node0), b'look up node at rev 0'),
2985 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3036 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2986 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3037 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2987 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3038 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2988 (lambda: resolvenode(node100), b'look up node at tip'),
3039 (lambda: resolvenode(node100), b'look up node at tip'),
2989 # 2x variation is to measure caching impact.
3040 # 2x variation is to measure caching impact.
2990 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3041 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2991 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3042 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2992 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3043 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2993 (
3044 (
2994 lambda: resolvenodes(allnodesrev, 2),
3045 lambda: resolvenodes(allnodesrev, 2),
2995 b'look up all nodes 2x (reverse)',
3046 b'look up all nodes 2x (reverse)',
2996 ),
3047 ),
2997 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3048 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2998 (
3049 (
2999 lambda: getentries(allrevs, 2),
3050 lambda: getentries(allrevs, 2),
3000 b'retrieve all index entries 2x (forward)',
3051 b'retrieve all index entries 2x (forward)',
3001 ),
3052 ),
3002 (
3053 (
3003 lambda: getentries(allrevsrev),
3054 lambda: getentries(allrevsrev),
3004 b'retrieve all index entries (reverse)',
3055 b'retrieve all index entries (reverse)',
3005 ),
3056 ),
3006 (
3057 (
3007 lambda: getentries(allrevsrev, 2),
3058 lambda: getentries(allrevsrev, 2),
3008 b'retrieve all index entries 2x (reverse)',
3059 b'retrieve all index entries 2x (reverse)',
3009 ),
3060 ),
3010 ]
3061 ]
3011
3062
3012 for fn, title in benches:
3063 for fn, title in benches:
3013 timer, fm = gettimer(ui, opts)
3064 timer, fm = gettimer(ui, opts)
3014 timer(fn, title=title)
3065 timer(fn, title=title)
3015 fm.end()
3066 fm.end()
3016
3067
3017
3068
3018 @command(
3069 @command(
3019 b'perf::revlogrevisions|perfrevlogrevisions',
3070 b'perf::revlogrevisions|perfrevlogrevisions',
3020 revlogopts
3071 revlogopts
3021 + formatteropts
3072 + formatteropts
3022 + [
3073 + [
3023 (b'd', b'dist', 100, b'distance between the revisions'),
3074 (b'd', b'dist', 100, b'distance between the revisions'),
3024 (b's', b'startrev', 0, b'revision to start reading at'),
3075 (b's', b'startrev', 0, b'revision to start reading at'),
3025 (b'', b'reverse', False, b'read in reverse'),
3076 (b'', b'reverse', False, b'read in reverse'),
3026 ],
3077 ],
3027 b'-c|-m|FILE',
3078 b'-c|-m|FILE',
3028 )
3079 )
3029 def perfrevlogrevisions(
3080 def perfrevlogrevisions(
3030 ui, repo, file_=None, startrev=0, reverse=False, **opts
3081 ui, repo, file_=None, startrev=0, reverse=False, **opts
3031 ):
3082 ):
3032 """Benchmark reading a series of revisions from a revlog.
3083 """Benchmark reading a series of revisions from a revlog.
3033
3084
3034 By default, we read every ``-d/--dist`` revision from 0 to tip of
3085 By default, we read every ``-d/--dist`` revision from 0 to tip of
3035 the specified revlog.
3086 the specified revlog.
3036
3087
3037 The start revision can be defined via ``-s/--startrev``.
3088 The start revision can be defined via ``-s/--startrev``.
3038 """
3089 """
3039 opts = _byteskwargs(opts)
3090 opts = _byteskwargs(opts)
3040
3091
3041 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3092 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3042 rllen = getlen(ui)(rl)
3093 rllen = getlen(ui)(rl)
3043
3094
3044 if startrev < 0:
3095 if startrev < 0:
3045 startrev = rllen + startrev
3096 startrev = rllen + startrev
3046
3097
3047 def d():
3098 def d():
3048 rl.clearcaches()
3099 rl.clearcaches()
3049
3100
3050 beginrev = startrev
3101 beginrev = startrev
3051 endrev = rllen
3102 endrev = rllen
3052 dist = opts[b'dist']
3103 dist = opts[b'dist']
3053
3104
3054 if reverse:
3105 if reverse:
3055 beginrev, endrev = endrev - 1, beginrev - 1
3106 beginrev, endrev = endrev - 1, beginrev - 1
3056 dist = -1 * dist
3107 dist = -1 * dist
3057
3108
3058 for x in _xrange(beginrev, endrev, dist):
3109 for x in _xrange(beginrev, endrev, dist):
3059 # Old revisions don't support passing int.
3110 # Old revisions don't support passing int.
3060 n = rl.node(x)
3111 n = rl.node(x)
3061 rl.revision(n)
3112 rl.revision(n)
3062
3113
3063 timer, fm = gettimer(ui, opts)
3114 timer, fm = gettimer(ui, opts)
3064 timer(d)
3115 timer(d)
3065 fm.end()
3116 fm.end()
3066
3117
3067
3118
3068 @command(
3119 @command(
3069 b'perf::revlogwrite|perfrevlogwrite',
3120 b'perf::revlogwrite|perfrevlogwrite',
3070 revlogopts
3121 revlogopts
3071 + formatteropts
3122 + formatteropts
3072 + [
3123 + [
3073 (b's', b'startrev', 1000, b'revision to start writing at'),
3124 (b's', b'startrev', 1000, b'revision to start writing at'),
3074 (b'', b'stoprev', -1, b'last revision to write'),
3125 (b'', b'stoprev', -1, b'last revision to write'),
3075 (b'', b'count', 3, b'number of passes to perform'),
3126 (b'', b'count', 3, b'number of passes to perform'),
3076 (b'', b'details', False, b'print timing for every revisions tested'),
3127 (b'', b'details', False, b'print timing for every revisions tested'),
3077 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3128 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3078 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3129 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3079 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3130 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3080 ],
3131 ],
3081 b'-c|-m|FILE',
3132 b'-c|-m|FILE',
3082 )
3133 )
3083 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3134 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3084 """Benchmark writing a series of revisions to a revlog.
3135 """Benchmark writing a series of revisions to a revlog.
3085
3136
3086 Possible source values are:
3137 Possible source values are:
3087 * `full`: add from a full text (default).
3138 * `full`: add from a full text (default).
3088 * `parent-1`: add from a delta to the first parent
3139 * `parent-1`: add from a delta to the first parent
3089 * `parent-2`: add from a delta to the second parent if it exists
3140 * `parent-2`: add from a delta to the second parent if it exists
3090 (use a delta from the first parent otherwise)
3141 (use a delta from the first parent otherwise)
3091 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3142 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3092 * `storage`: add from the existing precomputed deltas
3143 * `storage`: add from the existing precomputed deltas
3093
3144
3094 Note: This performance command measures performance in a custom way. As a
3145 Note: This performance command measures performance in a custom way. As a
3095 result some of the global configuration of the 'perf' command does not
3146 result some of the global configuration of the 'perf' command does not
3096 apply to it:
3147 apply to it:
3097
3148
3098 * ``pre-run``: disabled
3149 * ``pre-run``: disabled
3099
3150
3100 * ``profile-benchmark``: disabled
3151 * ``profile-benchmark``: disabled
3101
3152
3102 * ``run-limits``: disabled use --count instead
3153 * ``run-limits``: disabled use --count instead
3103 """
3154 """
3104 opts = _byteskwargs(opts)
3155 opts = _byteskwargs(opts)
3105
3156
3106 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3157 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3107 rllen = getlen(ui)(rl)
3158 rllen = getlen(ui)(rl)
3108 if startrev < 0:
3159 if startrev < 0:
3109 startrev = rllen + startrev
3160 startrev = rllen + startrev
3110 if stoprev < 0:
3161 if stoprev < 0:
3111 stoprev = rllen + stoprev
3162 stoprev = rllen + stoprev
3112
3163
3113 lazydeltabase = opts['lazydeltabase']
3164 lazydeltabase = opts['lazydeltabase']
3114 source = opts['source']
3165 source = opts['source']
3115 clearcaches = opts['clear_caches']
3166 clearcaches = opts['clear_caches']
3116 validsource = (
3167 validsource = (
3117 b'full',
3168 b'full',
3118 b'parent-1',
3169 b'parent-1',
3119 b'parent-2',
3170 b'parent-2',
3120 b'parent-smallest',
3171 b'parent-smallest',
3121 b'storage',
3172 b'storage',
3122 )
3173 )
3123 if source not in validsource:
3174 if source not in validsource:
3124 raise error.Abort('invalid source type: %s' % source)
3175 raise error.Abort('invalid source type: %s' % source)
3125
3176
3126 ### actually gather results
3177 ### actually gather results
3127 count = opts['count']
3178 count = opts['count']
3128 if count <= 0:
3179 if count <= 0:
3129 raise error.Abort('invalide run count: %d' % count)
3180 raise error.Abort('invalide run count: %d' % count)
3130 allresults = []
3181 allresults = []
3131 for c in range(count):
3182 for c in range(count):
3132 timing = _timeonewrite(
3183 timing = _timeonewrite(
3133 ui,
3184 ui,
3134 rl,
3185 rl,
3135 source,
3186 source,
3136 startrev,
3187 startrev,
3137 stoprev,
3188 stoprev,
3138 c + 1,
3189 c + 1,
3139 lazydeltabase=lazydeltabase,
3190 lazydeltabase=lazydeltabase,
3140 clearcaches=clearcaches,
3191 clearcaches=clearcaches,
3141 )
3192 )
3142 allresults.append(timing)
3193 allresults.append(timing)
3143
3194
3144 ### consolidate the results in a single list
3195 ### consolidate the results in a single list
3145 results = []
3196 results = []
3146 for idx, (rev, t) in enumerate(allresults[0]):
3197 for idx, (rev, t) in enumerate(allresults[0]):
3147 ts = [t]
3198 ts = [t]
3148 for other in allresults[1:]:
3199 for other in allresults[1:]:
3149 orev, ot = other[idx]
3200 orev, ot = other[idx]
3150 assert orev == rev
3201 assert orev == rev
3151 ts.append(ot)
3202 ts.append(ot)
3152 results.append((rev, ts))
3203 results.append((rev, ts))
3153 resultcount = len(results)
3204 resultcount = len(results)
3154
3205
3155 ### Compute and display relevant statistics
3206 ### Compute and display relevant statistics
3156
3207
3157 # get a formatter
3208 # get a formatter
3158 fm = ui.formatter(b'perf', opts)
3209 fm = ui.formatter(b'perf', opts)
3159 displayall = ui.configbool(b"perf", b"all-timing", False)
3210 displayall = ui.configbool(b"perf", b"all-timing", False)
3160
3211
3161 # print individual details if requested
3212 # print individual details if requested
3162 if opts['details']:
3213 if opts['details']:
3163 for idx, item in enumerate(results, 1):
3214 for idx, item in enumerate(results, 1):
3164 rev, data = item
3215 rev, data = item
3165 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3216 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3166 formatone(fm, data, title=title, displayall=displayall)
3217 formatone(fm, data, title=title, displayall=displayall)
3167
3218
3168 # sorts results by median time
3219 # sorts results by median time
3169 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3220 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3170 # list of (name, index) to display)
3221 # list of (name, index) to display)
3171 relevants = [
3222 relevants = [
3172 ("min", 0),
3223 ("min", 0),
3173 ("10%", resultcount * 10 // 100),
3224 ("10%", resultcount * 10 // 100),
3174 ("25%", resultcount * 25 // 100),
3225 ("25%", resultcount * 25 // 100),
3175 ("50%", resultcount * 70 // 100),
3226 ("50%", resultcount * 70 // 100),
3176 ("75%", resultcount * 75 // 100),
3227 ("75%", resultcount * 75 // 100),
3177 ("90%", resultcount * 90 // 100),
3228 ("90%", resultcount * 90 // 100),
3178 ("95%", resultcount * 95 // 100),
3229 ("95%", resultcount * 95 // 100),
3179 ("99%", resultcount * 99 // 100),
3230 ("99%", resultcount * 99 // 100),
3180 ("99.9%", resultcount * 999 // 1000),
3231 ("99.9%", resultcount * 999 // 1000),
3181 ("99.99%", resultcount * 9999 // 10000),
3232 ("99.99%", resultcount * 9999 // 10000),
3182 ("99.999%", resultcount * 99999 // 100000),
3233 ("99.999%", resultcount * 99999 // 100000),
3183 ("max", -1),
3234 ("max", -1),
3184 ]
3235 ]
3185 if not ui.quiet:
3236 if not ui.quiet:
3186 for name, idx in relevants:
3237 for name, idx in relevants:
3187 data = results[idx]
3238 data = results[idx]
3188 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3239 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3189 formatone(fm, data[1], title=title, displayall=displayall)
3240 formatone(fm, data[1], title=title, displayall=displayall)
3190
3241
3191 # XXX summing that many float will not be very precise, we ignore this fact
3242 # XXX summing that many float will not be very precise, we ignore this fact
3192 # for now
3243 # for now
3193 totaltime = []
3244 totaltime = []
3194 for item in allresults:
3245 for item in allresults:
3195 totaltime.append(
3246 totaltime.append(
3196 (
3247 (
3197 sum(x[1][0] for x in item),
3248 sum(x[1][0] for x in item),
3198 sum(x[1][1] for x in item),
3249 sum(x[1][1] for x in item),
3199 sum(x[1][2] for x in item),
3250 sum(x[1][2] for x in item),
3200 )
3251 )
3201 )
3252 )
3202 formatone(
3253 formatone(
3203 fm,
3254 fm,
3204 totaltime,
3255 totaltime,
3205 title="total time (%d revs)" % resultcount,
3256 title="total time (%d revs)" % resultcount,
3206 displayall=displayall,
3257 displayall=displayall,
3207 )
3258 )
3208 fm.end()
3259 fm.end()
3209
3260
3210
3261
3211 class _faketr:
3262 class _faketr:
3212 def add(s, x, y, z=None):
3263 def add(s, x, y, z=None):
3213 return None
3264 return None
3214
3265
3215
3266
3216 def _timeonewrite(
3267 def _timeonewrite(
3217 ui,
3268 ui,
3218 orig,
3269 orig,
3219 source,
3270 source,
3220 startrev,
3271 startrev,
3221 stoprev,
3272 stoprev,
3222 runidx=None,
3273 runidx=None,
3223 lazydeltabase=True,
3274 lazydeltabase=True,
3224 clearcaches=True,
3275 clearcaches=True,
3225 ):
3276 ):
3226 timings = []
3277 timings = []
3227 tr = _faketr()
3278 tr = _faketr()
3228 with _temprevlog(ui, orig, startrev) as dest:
3279 with _temprevlog(ui, orig, startrev) as dest:
3229 dest._lazydeltabase = lazydeltabase
3280 dest._lazydeltabase = lazydeltabase
3230 revs = list(orig.revs(startrev, stoprev))
3281 revs = list(orig.revs(startrev, stoprev))
3231 total = len(revs)
3282 total = len(revs)
3232 topic = 'adding'
3283 topic = 'adding'
3233 if runidx is not None:
3284 if runidx is not None:
3234 topic += ' (run #%d)' % runidx
3285 topic += ' (run #%d)' % runidx
3235 # Support both old and new progress API
3286 # Support both old and new progress API
3236 if util.safehasattr(ui, 'makeprogress'):
3287 if util.safehasattr(ui, 'makeprogress'):
3237 progress = ui.makeprogress(topic, unit='revs', total=total)
3288 progress = ui.makeprogress(topic, unit='revs', total=total)
3238
3289
3239 def updateprogress(pos):
3290 def updateprogress(pos):
3240 progress.update(pos)
3291 progress.update(pos)
3241
3292
3242 def completeprogress():
3293 def completeprogress():
3243 progress.complete()
3294 progress.complete()
3244
3295
3245 else:
3296 else:
3246
3297
3247 def updateprogress(pos):
3298 def updateprogress(pos):
3248 ui.progress(topic, pos, unit='revs', total=total)
3299 ui.progress(topic, pos, unit='revs', total=total)
3249
3300
3250 def completeprogress():
3301 def completeprogress():
3251 ui.progress(topic, None, unit='revs', total=total)
3302 ui.progress(topic, None, unit='revs', total=total)
3252
3303
3253 for idx, rev in enumerate(revs):
3304 for idx, rev in enumerate(revs):
3254 updateprogress(idx)
3305 updateprogress(idx)
3255 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3306 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3256 if clearcaches:
3307 if clearcaches:
3257 dest.index.clearcaches()
3308 dest.index.clearcaches()
3258 dest.clearcaches()
3309 dest.clearcaches()
3259 with timeone() as r:
3310 with timeone() as r:
3260 dest.addrawrevision(*addargs, **addkwargs)
3311 dest.addrawrevision(*addargs, **addkwargs)
3261 timings.append((rev, r[0]))
3312 timings.append((rev, r[0]))
3262 updateprogress(total)
3313 updateprogress(total)
3263 completeprogress()
3314 completeprogress()
3264 return timings
3315 return timings
3265
3316
3266
3317
3267 def _getrevisionseed(orig, rev, tr, source):
3318 def _getrevisionseed(orig, rev, tr, source):
3268 from mercurial.node import nullid
3319 from mercurial.node import nullid
3269
3320
3270 linkrev = orig.linkrev(rev)
3321 linkrev = orig.linkrev(rev)
3271 node = orig.node(rev)
3322 node = orig.node(rev)
3272 p1, p2 = orig.parents(node)
3323 p1, p2 = orig.parents(node)
3273 flags = orig.flags(rev)
3324 flags = orig.flags(rev)
3274 cachedelta = None
3325 cachedelta = None
3275 text = None
3326 text = None
3276
3327
3277 if source == b'full':
3328 if source == b'full':
3278 text = orig.revision(rev)
3329 text = orig.revision(rev)
3279 elif source == b'parent-1':
3330 elif source == b'parent-1':
3280 baserev = orig.rev(p1)
3331 baserev = orig.rev(p1)
3281 cachedelta = (baserev, orig.revdiff(p1, rev))
3332 cachedelta = (baserev, orig.revdiff(p1, rev))
3282 elif source == b'parent-2':
3333 elif source == b'parent-2':
3283 parent = p2
3334 parent = p2
3284 if p2 == nullid:
3335 if p2 == nullid:
3285 parent = p1
3336 parent = p1
3286 baserev = orig.rev(parent)
3337 baserev = orig.rev(parent)
3287 cachedelta = (baserev, orig.revdiff(parent, rev))
3338 cachedelta = (baserev, orig.revdiff(parent, rev))
3288 elif source == b'parent-smallest':
3339 elif source == b'parent-smallest':
3289 p1diff = orig.revdiff(p1, rev)
3340 p1diff = orig.revdiff(p1, rev)
3290 parent = p1
3341 parent = p1
3291 diff = p1diff
3342 diff = p1diff
3292 if p2 != nullid:
3343 if p2 != nullid:
3293 p2diff = orig.revdiff(p2, rev)
3344 p2diff = orig.revdiff(p2, rev)
3294 if len(p1diff) > len(p2diff):
3345 if len(p1diff) > len(p2diff):
3295 parent = p2
3346 parent = p2
3296 diff = p2diff
3347 diff = p2diff
3297 baserev = orig.rev(parent)
3348 baserev = orig.rev(parent)
3298 cachedelta = (baserev, diff)
3349 cachedelta = (baserev, diff)
3299 elif source == b'storage':
3350 elif source == b'storage':
3300 baserev = orig.deltaparent(rev)
3351 baserev = orig.deltaparent(rev)
3301 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3352 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3302
3353
3303 return (
3354 return (
3304 (text, tr, linkrev, p1, p2),
3355 (text, tr, linkrev, p1, p2),
3305 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3356 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3306 )
3357 )
3307
3358
3308
3359
3309 @contextlib.contextmanager
3360 @contextlib.contextmanager
3310 def _temprevlog(ui, orig, truncaterev):
3361 def _temprevlog(ui, orig, truncaterev):
3311 from mercurial import vfs as vfsmod
3362 from mercurial import vfs as vfsmod
3312
3363
3313 if orig._inline:
3364 if orig._inline:
3314 raise error.Abort('not supporting inline revlog (yet)')
3365 raise error.Abort('not supporting inline revlog (yet)')
3315 revlogkwargs = {}
3366 revlogkwargs = {}
3316 k = 'upperboundcomp'
3367 k = 'upperboundcomp'
3317 if util.safehasattr(orig, k):
3368 if util.safehasattr(orig, k):
3318 revlogkwargs[k] = getattr(orig, k)
3369 revlogkwargs[k] = getattr(orig, k)
3319
3370
3320 indexfile = getattr(orig, '_indexfile', None)
3371 indexfile = getattr(orig, '_indexfile', None)
3321 if indexfile is None:
3372 if indexfile is None:
3322 # compatibility with <= hg-5.8
3373 # compatibility with <= hg-5.8
3323 indexfile = getattr(orig, 'indexfile')
3374 indexfile = getattr(orig, 'indexfile')
3324 origindexpath = orig.opener.join(indexfile)
3375 origindexpath = orig.opener.join(indexfile)
3325
3376
3326 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3377 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3327 origdatapath = orig.opener.join(datafile)
3378 origdatapath = orig.opener.join(datafile)
3328 radix = b'revlog'
3379 radix = b'revlog'
3329 indexname = b'revlog.i'
3380 indexname = b'revlog.i'
3330 dataname = b'revlog.d'
3381 dataname = b'revlog.d'
3331
3382
3332 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3383 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3333 try:
3384 try:
3334 # copy the data file in a temporary directory
3385 # copy the data file in a temporary directory
3335 ui.debug('copying data in %s\n' % tmpdir)
3386 ui.debug('copying data in %s\n' % tmpdir)
3336 destindexpath = os.path.join(tmpdir, 'revlog.i')
3387 destindexpath = os.path.join(tmpdir, 'revlog.i')
3337 destdatapath = os.path.join(tmpdir, 'revlog.d')
3388 destdatapath = os.path.join(tmpdir, 'revlog.d')
3338 shutil.copyfile(origindexpath, destindexpath)
3389 shutil.copyfile(origindexpath, destindexpath)
3339 shutil.copyfile(origdatapath, destdatapath)
3390 shutil.copyfile(origdatapath, destdatapath)
3340
3391
3341 # remove the data we want to add again
3392 # remove the data we want to add again
3342 ui.debug('truncating data to be rewritten\n')
3393 ui.debug('truncating data to be rewritten\n')
3343 with open(destindexpath, 'ab') as index:
3394 with open(destindexpath, 'ab') as index:
3344 index.seek(0)
3395 index.seek(0)
3345 index.truncate(truncaterev * orig._io.size)
3396 index.truncate(truncaterev * orig._io.size)
3346 with open(destdatapath, 'ab') as data:
3397 with open(destdatapath, 'ab') as data:
3347 data.seek(0)
3398 data.seek(0)
3348 data.truncate(orig.start(truncaterev))
3399 data.truncate(orig.start(truncaterev))
3349
3400
3350 # instantiate a new revlog from the temporary copy
3401 # instantiate a new revlog from the temporary copy
3351 ui.debug('truncating adding to be rewritten\n')
3402 ui.debug('truncating adding to be rewritten\n')
3352 vfs = vfsmod.vfs(tmpdir)
3403 vfs = vfsmod.vfs(tmpdir)
3353 vfs.options = getattr(orig.opener, 'options', None)
3404 vfs.options = getattr(orig.opener, 'options', None)
3354
3405
3355 try:
3406 try:
3356 dest = revlog(vfs, radix=radix, **revlogkwargs)
3407 dest = revlog(vfs, radix=radix, **revlogkwargs)
3357 except TypeError:
3408 except TypeError:
3358 dest = revlog(
3409 dest = revlog(
3359 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3410 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3360 )
3411 )
3361 if dest._inline:
3412 if dest._inline:
3362 raise error.Abort('not supporting inline revlog (yet)')
3413 raise error.Abort('not supporting inline revlog (yet)')
3363 # make sure internals are initialized
3414 # make sure internals are initialized
3364 dest.revision(len(dest) - 1)
3415 dest.revision(len(dest) - 1)
3365 yield dest
3416 yield dest
3366 del dest, vfs
3417 del dest, vfs
3367 finally:
3418 finally:
3368 shutil.rmtree(tmpdir, True)
3419 shutil.rmtree(tmpdir, True)
3369
3420
3370
3421
3371 @command(
3422 @command(
3372 b'perf::revlogchunks|perfrevlogchunks',
3423 b'perf::revlogchunks|perfrevlogchunks',
3373 revlogopts
3424 revlogopts
3374 + formatteropts
3425 + formatteropts
3375 + [
3426 + [
3376 (b'e', b'engines', b'', b'compression engines to use'),
3427 (b'e', b'engines', b'', b'compression engines to use'),
3377 (b's', b'startrev', 0, b'revision to start at'),
3428 (b's', b'startrev', 0, b'revision to start at'),
3378 ],
3429 ],
3379 b'-c|-m|FILE',
3430 b'-c|-m|FILE',
3380 )
3431 )
3381 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3432 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3382 """Benchmark operations on revlog chunks.
3433 """Benchmark operations on revlog chunks.
3383
3434
3384 Logically, each revlog is a collection of fulltext revisions. However,
3435 Logically, each revlog is a collection of fulltext revisions. However,
3385 stored within each revlog are "chunks" of possibly compressed data. This
3436 stored within each revlog are "chunks" of possibly compressed data. This
3386 data needs to be read and decompressed or compressed and written.
3437 data needs to be read and decompressed or compressed and written.
3387
3438
3388 This command measures the time it takes to read+decompress and recompress
3439 This command measures the time it takes to read+decompress and recompress
3389 chunks in a revlog. It effectively isolates I/O and compression performance.
3440 chunks in a revlog. It effectively isolates I/O and compression performance.
3390 For measurements of higher-level operations like resolving revisions,
3441 For measurements of higher-level operations like resolving revisions,
3391 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3442 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3392 """
3443 """
3393 opts = _byteskwargs(opts)
3444 opts = _byteskwargs(opts)
3394
3445
3395 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3446 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3396
3447
3397 # _chunkraw was renamed to _getsegmentforrevs.
3448 # _chunkraw was renamed to _getsegmentforrevs.
3398 try:
3449 try:
3399 segmentforrevs = rl._getsegmentforrevs
3450 segmentforrevs = rl._getsegmentforrevs
3400 except AttributeError:
3451 except AttributeError:
3401 segmentforrevs = rl._chunkraw
3452 segmentforrevs = rl._chunkraw
3402
3453
3403 # Verify engines argument.
3454 # Verify engines argument.
3404 if engines:
3455 if engines:
3405 engines = {e.strip() for e in engines.split(b',')}
3456 engines = {e.strip() for e in engines.split(b',')}
3406 for engine in engines:
3457 for engine in engines:
3407 try:
3458 try:
3408 util.compressionengines[engine]
3459 util.compressionengines[engine]
3409 except KeyError:
3460 except KeyError:
3410 raise error.Abort(b'unknown compression engine: %s' % engine)
3461 raise error.Abort(b'unknown compression engine: %s' % engine)
3411 else:
3462 else:
3412 engines = []
3463 engines = []
3413 for e in util.compengines:
3464 for e in util.compengines:
3414 engine = util.compengines[e]
3465 engine = util.compengines[e]
3415 try:
3466 try:
3416 if engine.available():
3467 if engine.available():
3417 engine.revlogcompressor().compress(b'dummy')
3468 engine.revlogcompressor().compress(b'dummy')
3418 engines.append(e)
3469 engines.append(e)
3419 except NotImplementedError:
3470 except NotImplementedError:
3420 pass
3471 pass
3421
3472
3422 revs = list(rl.revs(startrev, len(rl) - 1))
3473 revs = list(rl.revs(startrev, len(rl) - 1))
3423
3474
3424 def rlfh(rl):
3475 def rlfh(rl):
3425 if rl._inline:
3476 if rl._inline:
3426 indexfile = getattr(rl, '_indexfile', None)
3477 indexfile = getattr(rl, '_indexfile', None)
3427 if indexfile is None:
3478 if indexfile is None:
3428 # compatibility with <= hg-5.8
3479 # compatibility with <= hg-5.8
3429 indexfile = getattr(rl, 'indexfile')
3480 indexfile = getattr(rl, 'indexfile')
3430 return getsvfs(repo)(indexfile)
3481 return getsvfs(repo)(indexfile)
3431 else:
3482 else:
3432 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3483 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3433 return getsvfs(repo)(datafile)
3484 return getsvfs(repo)(datafile)
3434
3485
3435 def doread():
3486 def doread():
3436 rl.clearcaches()
3487 rl.clearcaches()
3437 for rev in revs:
3488 for rev in revs:
3438 segmentforrevs(rev, rev)
3489 segmentforrevs(rev, rev)
3439
3490
3440 def doreadcachedfh():
3491 def doreadcachedfh():
3441 rl.clearcaches()
3492 rl.clearcaches()
3442 fh = rlfh(rl)
3493 fh = rlfh(rl)
3443 for rev in revs:
3494 for rev in revs:
3444 segmentforrevs(rev, rev, df=fh)
3495 segmentforrevs(rev, rev, df=fh)
3445
3496
3446 def doreadbatch():
3497 def doreadbatch():
3447 rl.clearcaches()
3498 rl.clearcaches()
3448 segmentforrevs(revs[0], revs[-1])
3499 segmentforrevs(revs[0], revs[-1])
3449
3500
3450 def doreadbatchcachedfh():
3501 def doreadbatchcachedfh():
3451 rl.clearcaches()
3502 rl.clearcaches()
3452 fh = rlfh(rl)
3503 fh = rlfh(rl)
3453 segmentforrevs(revs[0], revs[-1], df=fh)
3504 segmentforrevs(revs[0], revs[-1], df=fh)
3454
3505
3455 def dochunk():
3506 def dochunk():
3456 rl.clearcaches()
3507 rl.clearcaches()
3457 fh = rlfh(rl)
3508 fh = rlfh(rl)
3458 for rev in revs:
3509 for rev in revs:
3459 rl._chunk(rev, df=fh)
3510 rl._chunk(rev, df=fh)
3460
3511
3461 chunks = [None]
3512 chunks = [None]
3462
3513
3463 def dochunkbatch():
3514 def dochunkbatch():
3464 rl.clearcaches()
3515 rl.clearcaches()
3465 fh = rlfh(rl)
3516 fh = rlfh(rl)
3466 # Save chunks as a side-effect.
3517 # Save chunks as a side-effect.
3467 chunks[0] = rl._chunks(revs, df=fh)
3518 chunks[0] = rl._chunks(revs, df=fh)
3468
3519
3469 def docompress(compressor):
3520 def docompress(compressor):
3470 rl.clearcaches()
3521 rl.clearcaches()
3471
3522
3472 try:
3523 try:
3473 # Swap in the requested compression engine.
3524 # Swap in the requested compression engine.
3474 oldcompressor = rl._compressor
3525 oldcompressor = rl._compressor
3475 rl._compressor = compressor
3526 rl._compressor = compressor
3476 for chunk in chunks[0]:
3527 for chunk in chunks[0]:
3477 rl.compress(chunk)
3528 rl.compress(chunk)
3478 finally:
3529 finally:
3479 rl._compressor = oldcompressor
3530 rl._compressor = oldcompressor
3480
3531
3481 benches = [
3532 benches = [
3482 (lambda: doread(), b'read'),
3533 (lambda: doread(), b'read'),
3483 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3534 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3484 (lambda: doreadbatch(), b'read batch'),
3535 (lambda: doreadbatch(), b'read batch'),
3485 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3536 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3486 (lambda: dochunk(), b'chunk'),
3537 (lambda: dochunk(), b'chunk'),
3487 (lambda: dochunkbatch(), b'chunk batch'),
3538 (lambda: dochunkbatch(), b'chunk batch'),
3488 ]
3539 ]
3489
3540
3490 for engine in sorted(engines):
3541 for engine in sorted(engines):
3491 compressor = util.compengines[engine].revlogcompressor()
3542 compressor = util.compengines[engine].revlogcompressor()
3492 benches.append(
3543 benches.append(
3493 (
3544 (
3494 functools.partial(docompress, compressor),
3545 functools.partial(docompress, compressor),
3495 b'compress w/ %s' % engine,
3546 b'compress w/ %s' % engine,
3496 )
3547 )
3497 )
3548 )
3498
3549
3499 for fn, title in benches:
3550 for fn, title in benches:
3500 timer, fm = gettimer(ui, opts)
3551 timer, fm = gettimer(ui, opts)
3501 timer(fn, title=title)
3552 timer(fn, title=title)
3502 fm.end()
3553 fm.end()
3503
3554
3504
3555
3505 @command(
3556 @command(
3506 b'perf::revlogrevision|perfrevlogrevision',
3557 b'perf::revlogrevision|perfrevlogrevision',
3507 revlogopts
3558 revlogopts
3508 + formatteropts
3559 + formatteropts
3509 + [(b'', b'cache', False, b'use caches instead of clearing')],
3560 + [(b'', b'cache', False, b'use caches instead of clearing')],
3510 b'-c|-m|FILE REV',
3561 b'-c|-m|FILE REV',
3511 )
3562 )
3512 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3563 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3513 """Benchmark obtaining a revlog revision.
3564 """Benchmark obtaining a revlog revision.
3514
3565
3515 Obtaining a revlog revision consists of roughly the following steps:
3566 Obtaining a revlog revision consists of roughly the following steps:
3516
3567
3517 1. Compute the delta chain
3568 1. Compute the delta chain
3518 2. Slice the delta chain if applicable
3569 2. Slice the delta chain if applicable
3519 3. Obtain the raw chunks for that delta chain
3570 3. Obtain the raw chunks for that delta chain
3520 4. Decompress each raw chunk
3571 4. Decompress each raw chunk
3521 5. Apply binary patches to obtain fulltext
3572 5. Apply binary patches to obtain fulltext
3522 6. Verify hash of fulltext
3573 6. Verify hash of fulltext
3523
3574
3524 This command measures the time spent in each of these phases.
3575 This command measures the time spent in each of these phases.
3525 """
3576 """
3526 opts = _byteskwargs(opts)
3577 opts = _byteskwargs(opts)
3527
3578
3528 if opts.get(b'changelog') or opts.get(b'manifest'):
3579 if opts.get(b'changelog') or opts.get(b'manifest'):
3529 file_, rev = None, file_
3580 file_, rev = None, file_
3530 elif rev is None:
3581 elif rev is None:
3531 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3582 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3532
3583
3533 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3584 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3534
3585
3535 # _chunkraw was renamed to _getsegmentforrevs.
3586 # _chunkraw was renamed to _getsegmentforrevs.
3536 try:
3587 try:
3537 segmentforrevs = r._getsegmentforrevs
3588 segmentforrevs = r._getsegmentforrevs
3538 except AttributeError:
3589 except AttributeError:
3539 segmentforrevs = r._chunkraw
3590 segmentforrevs = r._chunkraw
3540
3591
3541 node = r.lookup(rev)
3592 node = r.lookup(rev)
3542 rev = r.rev(node)
3593 rev = r.rev(node)
3543
3594
3544 def getrawchunks(data, chain):
3595 def getrawchunks(data, chain):
3545 start = r.start
3596 start = r.start
3546 length = r.length
3597 length = r.length
3547 inline = r._inline
3598 inline = r._inline
3548 try:
3599 try:
3549 iosize = r.index.entry_size
3600 iosize = r.index.entry_size
3550 except AttributeError:
3601 except AttributeError:
3551 iosize = r._io.size
3602 iosize = r._io.size
3552 buffer = util.buffer
3603 buffer = util.buffer
3553
3604
3554 chunks = []
3605 chunks = []
3555 ladd = chunks.append
3606 ladd = chunks.append
3556 for idx, item in enumerate(chain):
3607 for idx, item in enumerate(chain):
3557 offset = start(item[0])
3608 offset = start(item[0])
3558 bits = data[idx]
3609 bits = data[idx]
3559 for rev in item:
3610 for rev in item:
3560 chunkstart = start(rev)
3611 chunkstart = start(rev)
3561 if inline:
3612 if inline:
3562 chunkstart += (rev + 1) * iosize
3613 chunkstart += (rev + 1) * iosize
3563 chunklength = length(rev)
3614 chunklength = length(rev)
3564 ladd(buffer(bits, chunkstart - offset, chunklength))
3615 ladd(buffer(bits, chunkstart - offset, chunklength))
3565
3616
3566 return chunks
3617 return chunks
3567
3618
3568 def dodeltachain(rev):
3619 def dodeltachain(rev):
3569 if not cache:
3620 if not cache:
3570 r.clearcaches()
3621 r.clearcaches()
3571 r._deltachain(rev)
3622 r._deltachain(rev)
3572
3623
3573 def doread(chain):
3624 def doread(chain):
3574 if not cache:
3625 if not cache:
3575 r.clearcaches()
3626 r.clearcaches()
3576 for item in slicedchain:
3627 for item in slicedchain:
3577 segmentforrevs(item[0], item[-1])
3628 segmentforrevs(item[0], item[-1])
3578
3629
3579 def doslice(r, chain, size):
3630 def doslice(r, chain, size):
3580 for s in slicechunk(r, chain, targetsize=size):
3631 for s in slicechunk(r, chain, targetsize=size):
3581 pass
3632 pass
3582
3633
3583 def dorawchunks(data, chain):
3634 def dorawchunks(data, chain):
3584 if not cache:
3635 if not cache:
3585 r.clearcaches()
3636 r.clearcaches()
3586 getrawchunks(data, chain)
3637 getrawchunks(data, chain)
3587
3638
3588 def dodecompress(chunks):
3639 def dodecompress(chunks):
3589 decomp = r.decompress
3640 decomp = r.decompress
3590 for chunk in chunks:
3641 for chunk in chunks:
3591 decomp(chunk)
3642 decomp(chunk)
3592
3643
3593 def dopatch(text, bins):
3644 def dopatch(text, bins):
3594 if not cache:
3645 if not cache:
3595 r.clearcaches()
3646 r.clearcaches()
3596 mdiff.patches(text, bins)
3647 mdiff.patches(text, bins)
3597
3648
3598 def dohash(text):
3649 def dohash(text):
3599 if not cache:
3650 if not cache:
3600 r.clearcaches()
3651 r.clearcaches()
3601 r.checkhash(text, node, rev=rev)
3652 r.checkhash(text, node, rev=rev)
3602
3653
3603 def dorevision():
3654 def dorevision():
3604 if not cache:
3655 if not cache:
3605 r.clearcaches()
3656 r.clearcaches()
3606 r.revision(node)
3657 r.revision(node)
3607
3658
3608 try:
3659 try:
3609 from mercurial.revlogutils.deltas import slicechunk
3660 from mercurial.revlogutils.deltas import slicechunk
3610 except ImportError:
3661 except ImportError:
3611 slicechunk = getattr(revlog, '_slicechunk', None)
3662 slicechunk = getattr(revlog, '_slicechunk', None)
3612
3663
3613 size = r.length(rev)
3664 size = r.length(rev)
3614 chain = r._deltachain(rev)[0]
3665 chain = r._deltachain(rev)[0]
3615 if not getattr(r, '_withsparseread', False):
3666 if not getattr(r, '_withsparseread', False):
3616 slicedchain = (chain,)
3667 slicedchain = (chain,)
3617 else:
3668 else:
3618 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3669 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3619 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3670 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3620 rawchunks = getrawchunks(data, slicedchain)
3671 rawchunks = getrawchunks(data, slicedchain)
3621 bins = r._chunks(chain)
3672 bins = r._chunks(chain)
3622 text = bytes(bins[0])
3673 text = bytes(bins[0])
3623 bins = bins[1:]
3674 bins = bins[1:]
3624 text = mdiff.patches(text, bins)
3675 text = mdiff.patches(text, bins)
3625
3676
3626 benches = [
3677 benches = [
3627 (lambda: dorevision(), b'full'),
3678 (lambda: dorevision(), b'full'),
3628 (lambda: dodeltachain(rev), b'deltachain'),
3679 (lambda: dodeltachain(rev), b'deltachain'),
3629 (lambda: doread(chain), b'read'),
3680 (lambda: doread(chain), b'read'),
3630 ]
3681 ]
3631
3682
3632 if getattr(r, '_withsparseread', False):
3683 if getattr(r, '_withsparseread', False):
3633 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3684 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3634 benches.append(slicing)
3685 benches.append(slicing)
3635
3686
3636 benches.extend(
3687 benches.extend(
3637 [
3688 [
3638 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3689 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3639 (lambda: dodecompress(rawchunks), b'decompress'),
3690 (lambda: dodecompress(rawchunks), b'decompress'),
3640 (lambda: dopatch(text, bins), b'patch'),
3691 (lambda: dopatch(text, bins), b'patch'),
3641 (lambda: dohash(text), b'hash'),
3692 (lambda: dohash(text), b'hash'),
3642 ]
3693 ]
3643 )
3694 )
3644
3695
3645 timer, fm = gettimer(ui, opts)
3696 timer, fm = gettimer(ui, opts)
3646 for fn, title in benches:
3697 for fn, title in benches:
3647 timer(fn, title=title)
3698 timer(fn, title=title)
3648 fm.end()
3699 fm.end()
3649
3700
3650
3701
3651 @command(
3702 @command(
3652 b'perf::revset|perfrevset',
3703 b'perf::revset|perfrevset',
3653 [
3704 [
3654 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3705 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3655 (b'', b'contexts', False, b'obtain changectx for each revision'),
3706 (b'', b'contexts', False, b'obtain changectx for each revision'),
3656 ]
3707 ]
3657 + formatteropts,
3708 + formatteropts,
3658 b"REVSET",
3709 b"REVSET",
3659 )
3710 )
3660 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3711 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3661 """benchmark the execution time of a revset
3712 """benchmark the execution time of a revset
3662
3713
3663 Use the --clean option if need to evaluate the impact of build volatile
3714 Use the --clean option if need to evaluate the impact of build volatile
3664 revisions set cache on the revset execution. Volatile cache hold filtered
3715 revisions set cache on the revset execution. Volatile cache hold filtered
3665 and obsolete related cache."""
3716 and obsolete related cache."""
3666 opts = _byteskwargs(opts)
3717 opts = _byteskwargs(opts)
3667
3718
3668 timer, fm = gettimer(ui, opts)
3719 timer, fm = gettimer(ui, opts)
3669
3720
3670 def d():
3721 def d():
3671 if clear:
3722 if clear:
3672 repo.invalidatevolatilesets()
3723 repo.invalidatevolatilesets()
3673 if contexts:
3724 if contexts:
3674 for ctx in repo.set(expr):
3725 for ctx in repo.set(expr):
3675 pass
3726 pass
3676 else:
3727 else:
3677 for r in repo.revs(expr):
3728 for r in repo.revs(expr):
3678 pass
3729 pass
3679
3730
3680 timer(d)
3731 timer(d)
3681 fm.end()
3732 fm.end()
3682
3733
3683
3734
3684 @command(
3735 @command(
3685 b'perf::volatilesets|perfvolatilesets',
3736 b'perf::volatilesets|perfvolatilesets',
3686 [
3737 [
3687 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3738 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3688 ]
3739 ]
3689 + formatteropts,
3740 + formatteropts,
3690 )
3741 )
3691 def perfvolatilesets(ui, repo, *names, **opts):
3742 def perfvolatilesets(ui, repo, *names, **opts):
3692 """benchmark the computation of various volatile set
3743 """benchmark the computation of various volatile set
3693
3744
3694 Volatile set computes element related to filtering and obsolescence."""
3745 Volatile set computes element related to filtering and obsolescence."""
3695 opts = _byteskwargs(opts)
3746 opts = _byteskwargs(opts)
3696 timer, fm = gettimer(ui, opts)
3747 timer, fm = gettimer(ui, opts)
3697 repo = repo.unfiltered()
3748 repo = repo.unfiltered()
3698
3749
3699 def getobs(name):
3750 def getobs(name):
3700 def d():
3751 def d():
3701 repo.invalidatevolatilesets()
3752 repo.invalidatevolatilesets()
3702 if opts[b'clear_obsstore']:
3753 if opts[b'clear_obsstore']:
3703 clearfilecache(repo, b'obsstore')
3754 clearfilecache(repo, b'obsstore')
3704 obsolete.getrevs(repo, name)
3755 obsolete.getrevs(repo, name)
3705
3756
3706 return d
3757 return d
3707
3758
3708 allobs = sorted(obsolete.cachefuncs)
3759 allobs = sorted(obsolete.cachefuncs)
3709 if names:
3760 if names:
3710 allobs = [n for n in allobs if n in names]
3761 allobs = [n for n in allobs if n in names]
3711
3762
3712 for name in allobs:
3763 for name in allobs:
3713 timer(getobs(name), title=name)
3764 timer(getobs(name), title=name)
3714
3765
3715 def getfiltered(name):
3766 def getfiltered(name):
3716 def d():
3767 def d():
3717 repo.invalidatevolatilesets()
3768 repo.invalidatevolatilesets()
3718 if opts[b'clear_obsstore']:
3769 if opts[b'clear_obsstore']:
3719 clearfilecache(repo, b'obsstore')
3770 clearfilecache(repo, b'obsstore')
3720 repoview.filterrevs(repo, name)
3771 repoview.filterrevs(repo, name)
3721
3772
3722 return d
3773 return d
3723
3774
3724 allfilter = sorted(repoview.filtertable)
3775 allfilter = sorted(repoview.filtertable)
3725 if names:
3776 if names:
3726 allfilter = [n for n in allfilter if n in names]
3777 allfilter = [n for n in allfilter if n in names]
3727
3778
3728 for name in allfilter:
3779 for name in allfilter:
3729 timer(getfiltered(name), title=name)
3780 timer(getfiltered(name), title=name)
3730 fm.end()
3781 fm.end()
3731
3782
3732
3783
3733 @command(
3784 @command(
3734 b'perf::branchmap|perfbranchmap',
3785 b'perf::branchmap|perfbranchmap',
3735 [
3786 [
3736 (b'f', b'full', False, b'Includes build time of subset'),
3787 (b'f', b'full', False, b'Includes build time of subset'),
3737 (
3788 (
3738 b'',
3789 b'',
3739 b'clear-revbranch',
3790 b'clear-revbranch',
3740 False,
3791 False,
3741 b'purge the revbranch cache between computation',
3792 b'purge the revbranch cache between computation',
3742 ),
3793 ),
3743 ]
3794 ]
3744 + formatteropts,
3795 + formatteropts,
3745 )
3796 )
3746 def perfbranchmap(ui, repo, *filternames, **opts):
3797 def perfbranchmap(ui, repo, *filternames, **opts):
3747 """benchmark the update of a branchmap
3798 """benchmark the update of a branchmap
3748
3799
3749 This benchmarks the full repo.branchmap() call with read and write disabled
3800 This benchmarks the full repo.branchmap() call with read and write disabled
3750 """
3801 """
3751 opts = _byteskwargs(opts)
3802 opts = _byteskwargs(opts)
3752 full = opts.get(b"full", False)
3803 full = opts.get(b"full", False)
3753 clear_revbranch = opts.get(b"clear_revbranch", False)
3804 clear_revbranch = opts.get(b"clear_revbranch", False)
3754 timer, fm = gettimer(ui, opts)
3805 timer, fm = gettimer(ui, opts)
3755
3806
3756 def getbranchmap(filtername):
3807 def getbranchmap(filtername):
3757 """generate a benchmark function for the filtername"""
3808 """generate a benchmark function for the filtername"""
3758 if filtername is None:
3809 if filtername is None:
3759 view = repo
3810 view = repo
3760 else:
3811 else:
3761 view = repo.filtered(filtername)
3812 view = repo.filtered(filtername)
3762 if util.safehasattr(view._branchcaches, '_per_filter'):
3813 if util.safehasattr(view._branchcaches, '_per_filter'):
3763 filtered = view._branchcaches._per_filter
3814 filtered = view._branchcaches._per_filter
3764 else:
3815 else:
3765 # older versions
3816 # older versions
3766 filtered = view._branchcaches
3817 filtered = view._branchcaches
3767
3818
3768 def d():
3819 def d():
3769 if clear_revbranch:
3820 if clear_revbranch:
3770 repo.revbranchcache()._clear()
3821 repo.revbranchcache()._clear()
3771 if full:
3822 if full:
3772 view._branchcaches.clear()
3823 view._branchcaches.clear()
3773 else:
3824 else:
3774 filtered.pop(filtername, None)
3825 filtered.pop(filtername, None)
3775 view.branchmap()
3826 view.branchmap()
3776
3827
3777 return d
3828 return d
3778
3829
3779 # add filter in smaller subset to bigger subset
3830 # add filter in smaller subset to bigger subset
3780 possiblefilters = set(repoview.filtertable)
3831 possiblefilters = set(repoview.filtertable)
3781 if filternames:
3832 if filternames:
3782 possiblefilters &= set(filternames)
3833 possiblefilters &= set(filternames)
3783 subsettable = getbranchmapsubsettable()
3834 subsettable = getbranchmapsubsettable()
3784 allfilters = []
3835 allfilters = []
3785 while possiblefilters:
3836 while possiblefilters:
3786 for name in possiblefilters:
3837 for name in possiblefilters:
3787 subset = subsettable.get(name)
3838 subset = subsettable.get(name)
3788 if subset not in possiblefilters:
3839 if subset not in possiblefilters:
3789 break
3840 break
3790 else:
3841 else:
3791 assert False, b'subset cycle %s!' % possiblefilters
3842 assert False, b'subset cycle %s!' % possiblefilters
3792 allfilters.append(name)
3843 allfilters.append(name)
3793 possiblefilters.remove(name)
3844 possiblefilters.remove(name)
3794
3845
3795 # warm the cache
3846 # warm the cache
3796 if not full:
3847 if not full:
3797 for name in allfilters:
3848 for name in allfilters:
3798 repo.filtered(name).branchmap()
3849 repo.filtered(name).branchmap()
3799 if not filternames or b'unfiltered' in filternames:
3850 if not filternames or b'unfiltered' in filternames:
3800 # add unfiltered
3851 # add unfiltered
3801 allfilters.append(None)
3852 allfilters.append(None)
3802
3853
3803 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3854 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3804 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3855 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3805 branchcacheread.set(classmethod(lambda *args: None))
3856 branchcacheread.set(classmethod(lambda *args: None))
3806 else:
3857 else:
3807 # older versions
3858 # older versions
3808 branchcacheread = safeattrsetter(branchmap, b'read')
3859 branchcacheread = safeattrsetter(branchmap, b'read')
3809 branchcacheread.set(lambda *args: None)
3860 branchcacheread.set(lambda *args: None)
3810 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3861 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3811 branchcachewrite.set(lambda *args: None)
3862 branchcachewrite.set(lambda *args: None)
3812 try:
3863 try:
3813 for name in allfilters:
3864 for name in allfilters:
3814 printname = name
3865 printname = name
3815 if name is None:
3866 if name is None:
3816 printname = b'unfiltered'
3867 printname = b'unfiltered'
3817 timer(getbranchmap(name), title=printname)
3868 timer(getbranchmap(name), title=printname)
3818 finally:
3869 finally:
3819 branchcacheread.restore()
3870 branchcacheread.restore()
3820 branchcachewrite.restore()
3871 branchcachewrite.restore()
3821 fm.end()
3872 fm.end()
3822
3873
3823
3874
3824 @command(
3875 @command(
3825 b'perf::branchmapupdate|perfbranchmapupdate',
3876 b'perf::branchmapupdate|perfbranchmapupdate',
3826 [
3877 [
3827 (b'', b'base', [], b'subset of revision to start from'),
3878 (b'', b'base', [], b'subset of revision to start from'),
3828 (b'', b'target', [], b'subset of revision to end with'),
3879 (b'', b'target', [], b'subset of revision to end with'),
3829 (b'', b'clear-caches', False, b'clear cache between each runs'),
3880 (b'', b'clear-caches', False, b'clear cache between each runs'),
3830 ]
3881 ]
3831 + formatteropts,
3882 + formatteropts,
3832 )
3883 )
3833 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3884 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3834 """benchmark branchmap update from for <base> revs to <target> revs
3885 """benchmark branchmap update from for <base> revs to <target> revs
3835
3886
3836 If `--clear-caches` is passed, the following items will be reset before
3887 If `--clear-caches` is passed, the following items will be reset before
3837 each update:
3888 each update:
3838 * the changelog instance and associated indexes
3889 * the changelog instance and associated indexes
3839 * the rev-branch-cache instance
3890 * the rev-branch-cache instance
3840
3891
3841 Examples:
3892 Examples:
3842
3893
3843 # update for the one last revision
3894 # update for the one last revision
3844 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3895 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3845
3896
3846 $ update for change coming with a new branch
3897 $ update for change coming with a new branch
3847 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3898 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3848 """
3899 """
3849 from mercurial import branchmap
3900 from mercurial import branchmap
3850 from mercurial import repoview
3901 from mercurial import repoview
3851
3902
3852 opts = _byteskwargs(opts)
3903 opts = _byteskwargs(opts)
3853 timer, fm = gettimer(ui, opts)
3904 timer, fm = gettimer(ui, opts)
3854 clearcaches = opts[b'clear_caches']
3905 clearcaches = opts[b'clear_caches']
3855 unfi = repo.unfiltered()
3906 unfi = repo.unfiltered()
3856 x = [None] # used to pass data between closure
3907 x = [None] # used to pass data between closure
3857
3908
3858 # we use a `list` here to avoid possible side effect from smartset
3909 # we use a `list` here to avoid possible side effect from smartset
3859 baserevs = list(scmutil.revrange(repo, base))
3910 baserevs = list(scmutil.revrange(repo, base))
3860 targetrevs = list(scmutil.revrange(repo, target))
3911 targetrevs = list(scmutil.revrange(repo, target))
3861 if not baserevs:
3912 if not baserevs:
3862 raise error.Abort(b'no revisions selected for --base')
3913 raise error.Abort(b'no revisions selected for --base')
3863 if not targetrevs:
3914 if not targetrevs:
3864 raise error.Abort(b'no revisions selected for --target')
3915 raise error.Abort(b'no revisions selected for --target')
3865
3916
3866 # make sure the target branchmap also contains the one in the base
3917 # make sure the target branchmap also contains the one in the base
3867 targetrevs = list(set(baserevs) | set(targetrevs))
3918 targetrevs = list(set(baserevs) | set(targetrevs))
3868 targetrevs.sort()
3919 targetrevs.sort()
3869
3920
3870 cl = repo.changelog
3921 cl = repo.changelog
3871 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3922 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3872 allbaserevs.sort()
3923 allbaserevs.sort()
3873 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3924 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3874
3925
3875 newrevs = list(alltargetrevs.difference(allbaserevs))
3926 newrevs = list(alltargetrevs.difference(allbaserevs))
3876 newrevs.sort()
3927 newrevs.sort()
3877
3928
3878 allrevs = frozenset(unfi.changelog.revs())
3929 allrevs = frozenset(unfi.changelog.revs())
3879 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3930 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3880 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3931 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3881
3932
3882 def basefilter(repo, visibilityexceptions=None):
3933 def basefilter(repo, visibilityexceptions=None):
3883 return basefilterrevs
3934 return basefilterrevs
3884
3935
3885 def targetfilter(repo, visibilityexceptions=None):
3936 def targetfilter(repo, visibilityexceptions=None):
3886 return targetfilterrevs
3937 return targetfilterrevs
3887
3938
3888 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3939 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3889 ui.status(msg % (len(allbaserevs), len(newrevs)))
3940 ui.status(msg % (len(allbaserevs), len(newrevs)))
3890 if targetfilterrevs:
3941 if targetfilterrevs:
3891 msg = b'(%d revisions still filtered)\n'
3942 msg = b'(%d revisions still filtered)\n'
3892 ui.status(msg % len(targetfilterrevs))
3943 ui.status(msg % len(targetfilterrevs))
3893
3944
3894 try:
3945 try:
3895 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3946 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3896 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3947 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3897
3948
3898 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3949 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3899 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3950 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3900
3951
3901 # try to find an existing branchmap to reuse
3952 # try to find an existing branchmap to reuse
3902 subsettable = getbranchmapsubsettable()
3953 subsettable = getbranchmapsubsettable()
3903 candidatefilter = subsettable.get(None)
3954 candidatefilter = subsettable.get(None)
3904 while candidatefilter is not None:
3955 while candidatefilter is not None:
3905 candidatebm = repo.filtered(candidatefilter).branchmap()
3956 candidatebm = repo.filtered(candidatefilter).branchmap()
3906 if candidatebm.validfor(baserepo):
3957 if candidatebm.validfor(baserepo):
3907 filtered = repoview.filterrevs(repo, candidatefilter)
3958 filtered = repoview.filterrevs(repo, candidatefilter)
3908 missing = [r for r in allbaserevs if r in filtered]
3959 missing = [r for r in allbaserevs if r in filtered]
3909 base = candidatebm.copy()
3960 base = candidatebm.copy()
3910 base.update(baserepo, missing)
3961 base.update(baserepo, missing)
3911 break
3962 break
3912 candidatefilter = subsettable.get(candidatefilter)
3963 candidatefilter = subsettable.get(candidatefilter)
3913 else:
3964 else:
3914 # no suitable subset where found
3965 # no suitable subset where found
3915 base = branchmap.branchcache()
3966 base = branchmap.branchcache()
3916 base.update(baserepo, allbaserevs)
3967 base.update(baserepo, allbaserevs)
3917
3968
3918 def setup():
3969 def setup():
3919 x[0] = base.copy()
3970 x[0] = base.copy()
3920 if clearcaches:
3971 if clearcaches:
3921 unfi._revbranchcache = None
3972 unfi._revbranchcache = None
3922 clearchangelog(repo)
3973 clearchangelog(repo)
3923
3974
3924 def bench():
3975 def bench():
3925 x[0].update(targetrepo, newrevs)
3976 x[0].update(targetrepo, newrevs)
3926
3977
3927 timer(bench, setup=setup)
3978 timer(bench, setup=setup)
3928 fm.end()
3979 fm.end()
3929 finally:
3980 finally:
3930 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3981 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3931 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3982 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3932
3983
3933
3984
3934 @command(
3985 @command(
3935 b'perf::branchmapload|perfbranchmapload',
3986 b'perf::branchmapload|perfbranchmapload',
3936 [
3987 [
3937 (b'f', b'filter', b'', b'Specify repoview filter'),
3988 (b'f', b'filter', b'', b'Specify repoview filter'),
3938 (b'', b'list', False, b'List brachmap filter caches'),
3989 (b'', b'list', False, b'List brachmap filter caches'),
3939 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3990 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3940 ]
3991 ]
3941 + formatteropts,
3992 + formatteropts,
3942 )
3993 )
3943 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3994 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3944 """benchmark reading the branchmap"""
3995 """benchmark reading the branchmap"""
3945 opts = _byteskwargs(opts)
3996 opts = _byteskwargs(opts)
3946 clearrevlogs = opts[b'clear_revlogs']
3997 clearrevlogs = opts[b'clear_revlogs']
3947
3998
3948 if list:
3999 if list:
3949 for name, kind, st in repo.cachevfs.readdir(stat=True):
4000 for name, kind, st in repo.cachevfs.readdir(stat=True):
3950 if name.startswith(b'branch2'):
4001 if name.startswith(b'branch2'):
3951 filtername = name.partition(b'-')[2] or b'unfiltered'
4002 filtername = name.partition(b'-')[2] or b'unfiltered'
3952 ui.status(
4003 ui.status(
3953 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4004 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3954 )
4005 )
3955 return
4006 return
3956 if not filter:
4007 if not filter:
3957 filter = None
4008 filter = None
3958 subsettable = getbranchmapsubsettable()
4009 subsettable = getbranchmapsubsettable()
3959 if filter is None:
4010 if filter is None:
3960 repo = repo.unfiltered()
4011 repo = repo.unfiltered()
3961 else:
4012 else:
3962 repo = repoview.repoview(repo, filter)
4013 repo = repoview.repoview(repo, filter)
3963
4014
3964 repo.branchmap() # make sure we have a relevant, up to date branchmap
4015 repo.branchmap() # make sure we have a relevant, up to date branchmap
3965
4016
3966 try:
4017 try:
3967 fromfile = branchmap.branchcache.fromfile
4018 fromfile = branchmap.branchcache.fromfile
3968 except AttributeError:
4019 except AttributeError:
3969 # older versions
4020 # older versions
3970 fromfile = branchmap.read
4021 fromfile = branchmap.read
3971
4022
3972 currentfilter = filter
4023 currentfilter = filter
3973 # try once without timer, the filter may not be cached
4024 # try once without timer, the filter may not be cached
3974 while fromfile(repo) is None:
4025 while fromfile(repo) is None:
3975 currentfilter = subsettable.get(currentfilter)
4026 currentfilter = subsettable.get(currentfilter)
3976 if currentfilter is None:
4027 if currentfilter is None:
3977 raise error.Abort(
4028 raise error.Abort(
3978 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4029 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3979 )
4030 )
3980 repo = repo.filtered(currentfilter)
4031 repo = repo.filtered(currentfilter)
3981 timer, fm = gettimer(ui, opts)
4032 timer, fm = gettimer(ui, opts)
3982
4033
3983 def setup():
4034 def setup():
3984 if clearrevlogs:
4035 if clearrevlogs:
3985 clearchangelog(repo)
4036 clearchangelog(repo)
3986
4037
3987 def bench():
4038 def bench():
3988 fromfile(repo)
4039 fromfile(repo)
3989
4040
3990 timer(bench, setup=setup)
4041 timer(bench, setup=setup)
3991 fm.end()
4042 fm.end()
3992
4043
3993
4044
3994 @command(b'perf::loadmarkers|perfloadmarkers')
4045 @command(b'perf::loadmarkers|perfloadmarkers')
3995 def perfloadmarkers(ui, repo):
4046 def perfloadmarkers(ui, repo):
3996 """benchmark the time to parse the on-disk markers for a repo
4047 """benchmark the time to parse the on-disk markers for a repo
3997
4048
3998 Result is the number of markers in the repo."""
4049 Result is the number of markers in the repo."""
3999 timer, fm = gettimer(ui)
4050 timer, fm = gettimer(ui)
4000 svfs = getsvfs(repo)
4051 svfs = getsvfs(repo)
4001 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4052 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4002 fm.end()
4053 fm.end()
4003
4054
4004
4055
4005 @command(
4056 @command(
4006 b'perf::lrucachedict|perflrucachedict',
4057 b'perf::lrucachedict|perflrucachedict',
4007 formatteropts
4058 formatteropts
4008 + [
4059 + [
4009 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4060 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4010 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4061 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4011 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4062 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4012 (b'', b'size', 4, b'size of cache'),
4063 (b'', b'size', 4, b'size of cache'),
4013 (b'', b'gets', 10000, b'number of key lookups'),
4064 (b'', b'gets', 10000, b'number of key lookups'),
4014 (b'', b'sets', 10000, b'number of key sets'),
4065 (b'', b'sets', 10000, b'number of key sets'),
4015 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4066 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4016 (
4067 (
4017 b'',
4068 b'',
4018 b'mixedgetfreq',
4069 b'mixedgetfreq',
4019 50,
4070 50,
4020 b'frequency of get vs set ops in mixed mode',
4071 b'frequency of get vs set ops in mixed mode',
4021 ),
4072 ),
4022 ],
4073 ],
4023 norepo=True,
4074 norepo=True,
4024 )
4075 )
4025 def perflrucache(
4076 def perflrucache(
4026 ui,
4077 ui,
4027 mincost=0,
4078 mincost=0,
4028 maxcost=100,
4079 maxcost=100,
4029 costlimit=0,
4080 costlimit=0,
4030 size=4,
4081 size=4,
4031 gets=10000,
4082 gets=10000,
4032 sets=10000,
4083 sets=10000,
4033 mixed=10000,
4084 mixed=10000,
4034 mixedgetfreq=50,
4085 mixedgetfreq=50,
4035 **opts
4086 **opts
4036 ):
4087 ):
4037 opts = _byteskwargs(opts)
4088 opts = _byteskwargs(opts)
4038
4089
4039 def doinit():
4090 def doinit():
4040 for i in _xrange(10000):
4091 for i in _xrange(10000):
4041 util.lrucachedict(size)
4092 util.lrucachedict(size)
4042
4093
4043 costrange = list(range(mincost, maxcost + 1))
4094 costrange = list(range(mincost, maxcost + 1))
4044
4095
4045 values = []
4096 values = []
4046 for i in _xrange(size):
4097 for i in _xrange(size):
4047 values.append(random.randint(0, _maxint))
4098 values.append(random.randint(0, _maxint))
4048
4099
4049 # Get mode fills the cache and tests raw lookup performance with no
4100 # Get mode fills the cache and tests raw lookup performance with no
4050 # eviction.
4101 # eviction.
4051 getseq = []
4102 getseq = []
4052 for i in _xrange(gets):
4103 for i in _xrange(gets):
4053 getseq.append(random.choice(values))
4104 getseq.append(random.choice(values))
4054
4105
4055 def dogets():
4106 def dogets():
4056 d = util.lrucachedict(size)
4107 d = util.lrucachedict(size)
4057 for v in values:
4108 for v in values:
4058 d[v] = v
4109 d[v] = v
4059 for key in getseq:
4110 for key in getseq:
4060 value = d[key]
4111 value = d[key]
4061 value # silence pyflakes warning
4112 value # silence pyflakes warning
4062
4113
4063 def dogetscost():
4114 def dogetscost():
4064 d = util.lrucachedict(size, maxcost=costlimit)
4115 d = util.lrucachedict(size, maxcost=costlimit)
4065 for i, v in enumerate(values):
4116 for i, v in enumerate(values):
4066 d.insert(v, v, cost=costs[i])
4117 d.insert(v, v, cost=costs[i])
4067 for key in getseq:
4118 for key in getseq:
4068 try:
4119 try:
4069 value = d[key]
4120 value = d[key]
4070 value # silence pyflakes warning
4121 value # silence pyflakes warning
4071 except KeyError:
4122 except KeyError:
4072 pass
4123 pass
4073
4124
4074 # Set mode tests insertion speed with cache eviction.
4125 # Set mode tests insertion speed with cache eviction.
4075 setseq = []
4126 setseq = []
4076 costs = []
4127 costs = []
4077 for i in _xrange(sets):
4128 for i in _xrange(sets):
4078 setseq.append(random.randint(0, _maxint))
4129 setseq.append(random.randint(0, _maxint))
4079 costs.append(random.choice(costrange))
4130 costs.append(random.choice(costrange))
4080
4131
4081 def doinserts():
4132 def doinserts():
4082 d = util.lrucachedict(size)
4133 d = util.lrucachedict(size)
4083 for v in setseq:
4134 for v in setseq:
4084 d.insert(v, v)
4135 d.insert(v, v)
4085
4136
4086 def doinsertscost():
4137 def doinsertscost():
4087 d = util.lrucachedict(size, maxcost=costlimit)
4138 d = util.lrucachedict(size, maxcost=costlimit)
4088 for i, v in enumerate(setseq):
4139 for i, v in enumerate(setseq):
4089 d.insert(v, v, cost=costs[i])
4140 d.insert(v, v, cost=costs[i])
4090
4141
4091 def dosets():
4142 def dosets():
4092 d = util.lrucachedict(size)
4143 d = util.lrucachedict(size)
4093 for v in setseq:
4144 for v in setseq:
4094 d[v] = v
4145 d[v] = v
4095
4146
4096 # Mixed mode randomly performs gets and sets with eviction.
4147 # Mixed mode randomly performs gets and sets with eviction.
4097 mixedops = []
4148 mixedops = []
4098 for i in _xrange(mixed):
4149 for i in _xrange(mixed):
4099 r = random.randint(0, 100)
4150 r = random.randint(0, 100)
4100 if r < mixedgetfreq:
4151 if r < mixedgetfreq:
4101 op = 0
4152 op = 0
4102 else:
4153 else:
4103 op = 1
4154 op = 1
4104
4155
4105 mixedops.append(
4156 mixedops.append(
4106 (op, random.randint(0, size * 2), random.choice(costrange))
4157 (op, random.randint(0, size * 2), random.choice(costrange))
4107 )
4158 )
4108
4159
4109 def domixed():
4160 def domixed():
4110 d = util.lrucachedict(size)
4161 d = util.lrucachedict(size)
4111
4162
4112 for op, v, cost in mixedops:
4163 for op, v, cost in mixedops:
4113 if op == 0:
4164 if op == 0:
4114 try:
4165 try:
4115 d[v]
4166 d[v]
4116 except KeyError:
4167 except KeyError:
4117 pass
4168 pass
4118 else:
4169 else:
4119 d[v] = v
4170 d[v] = v
4120
4171
4121 def domixedcost():
4172 def domixedcost():
4122 d = util.lrucachedict(size, maxcost=costlimit)
4173 d = util.lrucachedict(size, maxcost=costlimit)
4123
4174
4124 for op, v, cost in mixedops:
4175 for op, v, cost in mixedops:
4125 if op == 0:
4176 if op == 0:
4126 try:
4177 try:
4127 d[v]
4178 d[v]
4128 except KeyError:
4179 except KeyError:
4129 pass
4180 pass
4130 else:
4181 else:
4131 d.insert(v, v, cost=cost)
4182 d.insert(v, v, cost=cost)
4132
4183
4133 benches = [
4184 benches = [
4134 (doinit, b'init'),
4185 (doinit, b'init'),
4135 ]
4186 ]
4136
4187
4137 if costlimit:
4188 if costlimit:
4138 benches.extend(
4189 benches.extend(
4139 [
4190 [
4140 (dogetscost, b'gets w/ cost limit'),
4191 (dogetscost, b'gets w/ cost limit'),
4141 (doinsertscost, b'inserts w/ cost limit'),
4192 (doinsertscost, b'inserts w/ cost limit'),
4142 (domixedcost, b'mixed w/ cost limit'),
4193 (domixedcost, b'mixed w/ cost limit'),
4143 ]
4194 ]
4144 )
4195 )
4145 else:
4196 else:
4146 benches.extend(
4197 benches.extend(
4147 [
4198 [
4148 (dogets, b'gets'),
4199 (dogets, b'gets'),
4149 (doinserts, b'inserts'),
4200 (doinserts, b'inserts'),
4150 (dosets, b'sets'),
4201 (dosets, b'sets'),
4151 (domixed, b'mixed'),
4202 (domixed, b'mixed'),
4152 ]
4203 ]
4153 )
4204 )
4154
4205
4155 for fn, title in benches:
4206 for fn, title in benches:
4156 timer, fm = gettimer(ui, opts)
4207 timer, fm = gettimer(ui, opts)
4157 timer(fn, title=title)
4208 timer(fn, title=title)
4158 fm.end()
4209 fm.end()
4159
4210
4160
4211
4161 @command(
4212 @command(
4162 b'perf::write|perfwrite',
4213 b'perf::write|perfwrite',
4163 formatteropts
4214 formatteropts
4164 + [
4215 + [
4165 (b'', b'write-method', b'write', b'ui write method'),
4216 (b'', b'write-method', b'write', b'ui write method'),
4166 (b'', b'nlines', 100, b'number of lines'),
4217 (b'', b'nlines', 100, b'number of lines'),
4167 (b'', b'nitems', 100, b'number of items (per line)'),
4218 (b'', b'nitems', 100, b'number of items (per line)'),
4168 (b'', b'item', b'x', b'item that is written'),
4219 (b'', b'item', b'x', b'item that is written'),
4169 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4220 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4170 (b'', b'flush-line', None, b'flush after each line'),
4221 (b'', b'flush-line', None, b'flush after each line'),
4171 ],
4222 ],
4172 )
4223 )
4173 def perfwrite(ui, repo, **opts):
4224 def perfwrite(ui, repo, **opts):
4174 """microbenchmark ui.write (and others)"""
4225 """microbenchmark ui.write (and others)"""
4175 opts = _byteskwargs(opts)
4226 opts = _byteskwargs(opts)
4176
4227
4177 write = getattr(ui, _sysstr(opts[b'write_method']))
4228 write = getattr(ui, _sysstr(opts[b'write_method']))
4178 nlines = int(opts[b'nlines'])
4229 nlines = int(opts[b'nlines'])
4179 nitems = int(opts[b'nitems'])
4230 nitems = int(opts[b'nitems'])
4180 item = opts[b'item']
4231 item = opts[b'item']
4181 batch_line = opts.get(b'batch_line')
4232 batch_line = opts.get(b'batch_line')
4182 flush_line = opts.get(b'flush_line')
4233 flush_line = opts.get(b'flush_line')
4183
4234
4184 if batch_line:
4235 if batch_line:
4185 line = item * nitems + b'\n'
4236 line = item * nitems + b'\n'
4186
4237
4187 def benchmark():
4238 def benchmark():
4188 for i in pycompat.xrange(nlines):
4239 for i in pycompat.xrange(nlines):
4189 if batch_line:
4240 if batch_line:
4190 write(line)
4241 write(line)
4191 else:
4242 else:
4192 for i in pycompat.xrange(nitems):
4243 for i in pycompat.xrange(nitems):
4193 write(item)
4244 write(item)
4194 write(b'\n')
4245 write(b'\n')
4195 if flush_line:
4246 if flush_line:
4196 ui.flush()
4247 ui.flush()
4197 ui.flush()
4248 ui.flush()
4198
4249
4199 timer, fm = gettimer(ui, opts)
4250 timer, fm = gettimer(ui, opts)
4200 timer(benchmark)
4251 timer(benchmark)
4201 fm.end()
4252 fm.end()
4202
4253
4203
4254
4204 def uisetup(ui):
4255 def uisetup(ui):
4205 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4256 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4206 commands, b'debugrevlogopts'
4257 commands, b'debugrevlogopts'
4207 ):
4258 ):
4208 # for "historical portability":
4259 # for "historical portability":
4209 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4260 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4210 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4261 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4211 # openrevlog() should cause failure, because it has been
4262 # openrevlog() should cause failure, because it has been
4212 # available since 3.5 (or 49c583ca48c4).
4263 # available since 3.5 (or 49c583ca48c4).
4213 def openrevlog(orig, repo, cmd, file_, opts):
4264 def openrevlog(orig, repo, cmd, file_, opts):
4214 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4265 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4215 raise error.Abort(
4266 raise error.Abort(
4216 b"This version doesn't support --dir option",
4267 b"This version doesn't support --dir option",
4217 hint=b"use 3.5 or later",
4268 hint=b"use 3.5 or later",
4218 )
4269 )
4219 return orig(repo, cmd, file_, opts)
4270 return orig(repo, cmd, file_, opts)
4220
4271
4221 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4272 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4222
4273
4223
4274
4224 @command(
4275 @command(
4225 b'perf::progress|perfprogress',
4276 b'perf::progress|perfprogress',
4226 formatteropts
4277 formatteropts
4227 + [
4278 + [
4228 (b'', b'topic', b'topic', b'topic for progress messages'),
4279 (b'', b'topic', b'topic', b'topic for progress messages'),
4229 (b'c', b'total', 1000000, b'total value we are progressing to'),
4280 (b'c', b'total', 1000000, b'total value we are progressing to'),
4230 ],
4281 ],
4231 norepo=True,
4282 norepo=True,
4232 )
4283 )
4233 def perfprogress(ui, topic=None, total=None, **opts):
4284 def perfprogress(ui, topic=None, total=None, **opts):
4234 """printing of progress bars"""
4285 """printing of progress bars"""
4235 opts = _byteskwargs(opts)
4286 opts = _byteskwargs(opts)
4236
4287
4237 timer, fm = gettimer(ui, opts)
4288 timer, fm = gettimer(ui, opts)
4238
4289
4239 def doprogress():
4290 def doprogress():
4240 with ui.makeprogress(topic, total=total) as progress:
4291 with ui.makeprogress(topic, total=total) as progress:
4241 for i in _xrange(total):
4292 for i in _xrange(total):
4242 progress.increment()
4293 progress.increment()
4243
4294
4244 timer(doprogress)
4295 timer(doprogress)
4245 fm.end()
4296 fm.end()
@@ -1,435 +1,437 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perf::addremove
81 perf::addremove
82 (no help text available)
82 (no help text available)
83 perf::ancestors
83 perf::ancestors
84 (no help text available)
84 (no help text available)
85 perf::ancestorset
85 perf::ancestorset
86 (no help text available)
86 (no help text available)
87 perf::annotate
87 perf::annotate
88 (no help text available)
88 (no help text available)
89 perf::bdiff benchmark a bdiff between revisions
89 perf::bdiff benchmark a bdiff between revisions
90 perf::bookmarks
90 perf::bookmarks
91 benchmark parsing bookmarks from disk to memory
91 benchmark parsing bookmarks from disk to memory
92 perf::branchmap
92 perf::branchmap
93 benchmark the update of a branchmap
93 benchmark the update of a branchmap
94 perf::branchmapload
94 perf::branchmapload
95 benchmark reading the branchmap
95 benchmark reading the branchmap
96 perf::branchmapupdate
96 perf::branchmapupdate
97 benchmark branchmap update from for <base> revs to <target>
97 benchmark branchmap update from for <base> revs to <target>
98 revs
98 revs
99 perf::bundle benchmark the creation of a bundle from a repository
99 perf::bundle benchmark the creation of a bundle from a repository
100 perf::bundleread
100 perf::bundleread
101 Benchmark reading of bundle files.
101 Benchmark reading of bundle files.
102 perf::cca (no help text available)
102 perf::cca (no help text available)
103 perf::changegroupchangelog
103 perf::changegroupchangelog
104 Benchmark producing a changelog group for a changegroup.
104 Benchmark producing a changelog group for a changegroup.
105 perf::changeset
105 perf::changeset
106 (no help text available)
106 (no help text available)
107 perf::ctxfiles
107 perf::ctxfiles
108 (no help text available)
108 (no help text available)
109 perf::delta-find
109 perf::delta-find
110 benchmark the process of finding a valid delta for a revlog
110 benchmark the process of finding a valid delta for a revlog
111 revision
111 revision
112 perf::diffwd Profile diff of working directory changes
112 perf::diffwd Profile diff of working directory changes
113 perf::dirfoldmap
113 perf::dirfoldmap
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 perf::dirs (no help text available)
115 perf::dirs (no help text available)
116 perf::dirstate
116 perf::dirstate
117 benchmap the time of various distate operations
117 benchmap the time of various distate operations
118 perf::dirstatedirs
118 perf::dirstatedirs
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 perf::dirstatefoldmap
120 perf::dirstatefoldmap
121 benchmap a 'dirstate._map.filefoldmap.get()' request
121 benchmap a 'dirstate._map.filefoldmap.get()' request
122 perf::dirstatewrite
122 perf::dirstatewrite
123 benchmap the time it take to write a dirstate on disk
123 benchmap the time it take to write a dirstate on disk
124 perf::discovery
124 perf::discovery
125 benchmark discovery between local repo and the peer at given
125 benchmark discovery between local repo and the peer at given
126 path
126 path
127 perf::fncacheencode
127 perf::fncacheencode
128 (no help text available)
128 (no help text available)
129 perf::fncacheload
129 perf::fncacheload
130 (no help text available)
130 (no help text available)
131 perf::fncachewrite
131 perf::fncachewrite
132 (no help text available)
132 (no help text available)
133 perf::heads benchmark the computation of a changelog heads
133 perf::heads benchmark the computation of a changelog heads
134 perf::helper-mergecopies
134 perf::helper-mergecopies
135 find statistics about potential parameters for
135 find statistics about potential parameters for
136 'perfmergecopies'
136 'perfmergecopies'
137 perf::helper-pathcopies
137 perf::helper-pathcopies
138 find statistic about potential parameters for the
138 find statistic about potential parameters for the
139 'perftracecopies'
139 'perftracecopies'
140 perf::ignore benchmark operation related to computing ignore
140 perf::ignore benchmark operation related to computing ignore
141 perf::index benchmark index creation time followed by a lookup
141 perf::index benchmark index creation time followed by a lookup
142 perf::linelogedits
142 perf::linelogedits
143 (no help text available)
143 (no help text available)
144 perf::loadmarkers
144 perf::loadmarkers
145 benchmark the time to parse the on-disk markers for a repo
145 benchmark the time to parse the on-disk markers for a repo
146 perf::log (no help text available)
146 perf::log (no help text available)
147 perf::lookup (no help text available)
147 perf::lookup (no help text available)
148 perf::lrucachedict
148 perf::lrucachedict
149 (no help text available)
149 (no help text available)
150 perf::manifest
150 perf::manifest
151 benchmark the time to read a manifest from disk and return a
151 benchmark the time to read a manifest from disk and return a
152 usable
152 usable
153 perf::mergecalculate
153 perf::mergecalculate
154 (no help text available)
154 (no help text available)
155 perf::mergecopies
155 perf::mergecopies
156 measure runtime of 'copies.mergecopies'
156 measure runtime of 'copies.mergecopies'
157 perf::moonwalk
157 perf::moonwalk
158 benchmark walking the changelog backwards
158 benchmark walking the changelog backwards
159 perf::nodelookup
159 perf::nodelookup
160 (no help text available)
160 (no help text available)
161 perf::nodemap
161 perf::nodemap
162 benchmark the time necessary to look up revision from a cold
162 benchmark the time necessary to look up revision from a cold
163 nodemap
163 nodemap
164 perf::parents
164 perf::parents
165 benchmark the time necessary to fetch one changeset's parents.
165 benchmark the time necessary to fetch one changeset's parents.
166 perf::pathcopies
166 perf::pathcopies
167 benchmark the copy tracing logic
167 benchmark the copy tracing logic
168 perf::phases benchmark phasesets computation
168 perf::phases benchmark phasesets computation
169 perf::phasesremote
169 perf::phasesremote
170 benchmark time needed to analyse phases of the remote server
170 benchmark time needed to analyse phases of the remote server
171 perf::progress
171 perf::progress
172 printing of progress bars
172 printing of progress bars
173 perf::rawfiles
173 perf::rawfiles
174 (no help text available)
174 (no help text available)
175 perf::revlogchunks
175 perf::revlogchunks
176 Benchmark operations on revlog chunks.
176 Benchmark operations on revlog chunks.
177 perf::revlogindex
177 perf::revlogindex
178 Benchmark operations against a revlog index.
178 Benchmark operations against a revlog index.
179 perf::revlogrevision
179 perf::revlogrevision
180 Benchmark obtaining a revlog revision.
180 Benchmark obtaining a revlog revision.
181 perf::revlogrevisions
181 perf::revlogrevisions
182 Benchmark reading a series of revisions from a revlog.
182 Benchmark reading a series of revisions from a revlog.
183 perf::revlogwrite
183 perf::revlogwrite
184 Benchmark writing a series of revisions to a revlog.
184 Benchmark writing a series of revisions to a revlog.
185 perf::revrange
185 perf::revrange
186 (no help text available)
186 (no help text available)
187 perf::revset benchmark the execution time of a revset
187 perf::revset benchmark the execution time of a revset
188 perf::startup
188 perf::startup
189 (no help text available)
189 (no help text available)
190 perf::status benchmark the performance of a single status call
190 perf::status benchmark the performance of a single status call
191 perf::stream-locked-section
192 benchmark the initial, repo-locked, section of a stream-clone
191 perf::tags (no help text available)
193 perf::tags (no help text available)
192 perf::templating
194 perf::templating
193 test the rendering time of a given template
195 test the rendering time of a given template
194 perf::unbundle
196 perf::unbundle
195 benchmark application of a bundle in a repository.
197 benchmark application of a bundle in a repository.
196 perf::unidiff
198 perf::unidiff
197 benchmark a unified diff between revisions
199 benchmark a unified diff between revisions
198 perf::volatilesets
200 perf::volatilesets
199 benchmark the computation of various volatile set
201 benchmark the computation of various volatile set
200 perf::walk (no help text available)
202 perf::walk (no help text available)
201 perf::write microbenchmark ui.write (and others)
203 perf::write microbenchmark ui.write (and others)
202
204
203 (use 'hg help -v perf' to show built-in aliases and global options)
205 (use 'hg help -v perf' to show built-in aliases and global options)
204
206
205 $ hg help perfaddremove
207 $ hg help perfaddremove
206 hg perf::addremove
208 hg perf::addremove
207
209
208 aliases: perfaddremove
210 aliases: perfaddremove
209
211
210 (no help text available)
212 (no help text available)
211
213
212 options:
214 options:
213
215
214 -T --template TEMPLATE display with template
216 -T --template TEMPLATE display with template
215
217
216 (some details hidden, use --verbose to show complete help)
218 (some details hidden, use --verbose to show complete help)
217
219
218 $ hg perfaddremove
220 $ hg perfaddremove
219 $ hg perfancestors
221 $ hg perfancestors
220 $ hg perfancestorset 2
222 $ hg perfancestorset 2
221 $ hg perfannotate a
223 $ hg perfannotate a
222 $ hg perfbdiff -c 1
224 $ hg perfbdiff -c 1
223 $ hg perfbdiff --alldata 1
225 $ hg perfbdiff --alldata 1
224 $ hg perfunidiff -c 1
226 $ hg perfunidiff -c 1
225 $ hg perfunidiff --alldata 1
227 $ hg perfunidiff --alldata 1
226 $ hg perfbookmarks
228 $ hg perfbookmarks
227 $ hg perfbranchmap
229 $ hg perfbranchmap
228 $ hg perfbranchmapload
230 $ hg perfbranchmapload
229 $ hg perfbranchmapupdate --base "not tip" --target "tip"
231 $ hg perfbranchmapupdate --base "not tip" --target "tip"
230 benchmark of branchmap with 3 revisions with 1 new ones
232 benchmark of branchmap with 3 revisions with 1 new ones
231 $ hg perfcca
233 $ hg perfcca
232 $ hg perfchangegroupchangelog
234 $ hg perfchangegroupchangelog
233 $ hg perfchangegroupchangelog --cgversion 01
235 $ hg perfchangegroupchangelog --cgversion 01
234 $ hg perfchangeset 2
236 $ hg perfchangeset 2
235 $ hg perfctxfiles 2
237 $ hg perfctxfiles 2
236 $ hg perfdiffwd
238 $ hg perfdiffwd
237 $ hg perfdirfoldmap
239 $ hg perfdirfoldmap
238 $ hg perfdirs
240 $ hg perfdirs
239 $ hg perfdirstate
241 $ hg perfdirstate
240 $ hg perfdirstate --contains
242 $ hg perfdirstate --contains
241 $ hg perfdirstate --iteration
243 $ hg perfdirstate --iteration
242 $ hg perfdirstatedirs
244 $ hg perfdirstatedirs
243 $ hg perfdirstatefoldmap
245 $ hg perfdirstatefoldmap
244 $ hg perfdirstatewrite
246 $ hg perfdirstatewrite
245 #if repofncache
247 #if repofncache
246 $ hg perffncacheencode
248 $ hg perffncacheencode
247 $ hg perffncacheload
249 $ hg perffncacheload
248 $ hg debugrebuildfncache
250 $ hg debugrebuildfncache
249 fncache already up to date
251 fncache already up to date
250 $ hg perffncachewrite
252 $ hg perffncachewrite
251 $ hg debugrebuildfncache
253 $ hg debugrebuildfncache
252 fncache already up to date
254 fncache already up to date
253 #endif
255 #endif
254 $ hg perfheads
256 $ hg perfheads
255 $ hg perfignore
257 $ hg perfignore
256 $ hg perfindex
258 $ hg perfindex
257 $ hg perflinelogedits -n 1
259 $ hg perflinelogedits -n 1
258 $ hg perfloadmarkers
260 $ hg perfloadmarkers
259 $ hg perflog
261 $ hg perflog
260 $ hg perflookup 2
262 $ hg perflookup 2
261 $ hg perflrucache
263 $ hg perflrucache
262 $ hg perfmanifest 2
264 $ hg perfmanifest 2
263 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
265 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
264 $ hg perfmanifest -m 44fe2c8352bb
266 $ hg perfmanifest -m 44fe2c8352bb
265 abort: manifest revision must be integer or full node
267 abort: manifest revision must be integer or full node
266 [255]
268 [255]
267 $ hg perfmergecalculate -r 3
269 $ hg perfmergecalculate -r 3
268 $ hg perfmoonwalk
270 $ hg perfmoonwalk
269 $ hg perfnodelookup 2
271 $ hg perfnodelookup 2
270 $ hg perfpathcopies 1 2
272 $ hg perfpathcopies 1 2
271 $ hg perfprogress --total 1000
273 $ hg perfprogress --total 1000
272 $ hg perfrawfiles 2
274 $ hg perfrawfiles 2
273 $ hg perfrevlogindex -c
275 $ hg perfrevlogindex -c
274 #if reporevlogstore
276 #if reporevlogstore
275 $ hg perfrevlogrevisions .hg/store/data/a.i
277 $ hg perfrevlogrevisions .hg/store/data/a.i
276 #endif
278 #endif
277 $ hg perfrevlogrevision -m 0
279 $ hg perfrevlogrevision -m 0
278 $ hg perfrevlogchunks -c
280 $ hg perfrevlogchunks -c
279 $ hg perfrevrange
281 $ hg perfrevrange
280 $ hg perfrevset 'all()'
282 $ hg perfrevset 'all()'
281 $ hg perfstartup
283 $ hg perfstartup
282 $ hg perfstatus
284 $ hg perfstatus
283 $ hg perfstatus --dirstate
285 $ hg perfstatus --dirstate
284 $ hg perftags
286 $ hg perftags
285 $ hg perftemplating
287 $ hg perftemplating
286 $ hg perfvolatilesets
288 $ hg perfvolatilesets
287 $ hg perfwalk
289 $ hg perfwalk
288 $ hg perfparents
290 $ hg perfparents
289 $ hg perfdiscovery -q .
291 $ hg perfdiscovery -q .
290
292
291 Test run control
293 Test run control
292 ----------------
294 ----------------
293
295
294 Simple single entry
296 Simple single entry
295
297
296 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
298 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
297 ! wall * comb * user * sys * (best of 15) (glob)
299 ! wall * comb * user * sys * (best of 15) (glob)
298
300
299 Multiple entries
301 Multiple entries
300
302
301 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
303 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
302 ! wall * comb * user * sys * (best of 5) (glob)
304 ! wall * comb * user * sys * (best of 5) (glob)
303
305
304 error case are ignored
306 error case are ignored
305
307
306 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
308 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
307 malformatted run limit entry, missing "-": 500
309 malformatted run limit entry, missing "-": 500
308 ! wall * comb * user * sys * (best of 5) (glob)
310 ! wall * comb * user * sys * (best of 5) (glob)
309 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
311 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
310 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12
312 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12
311 ! wall * comb * user * sys * (best of 5) (glob)
313 ! wall * comb * user * sys * (best of 5) (glob)
312 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
314 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
313 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
315 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
314 ! wall * comb * user * sys * (best of 5) (glob)
316 ! wall * comb * user * sys * (best of 5) (glob)
315
317
316 test actual output
318 test actual output
317 ------------------
319 ------------------
318
320
319 normal output:
321 normal output:
320
322
321 $ hg perfheads --config perf.stub=no
323 $ hg perfheads --config perf.stub=no
322 ! wall * comb * user * sys * (best of *) (glob)
324 ! wall * comb * user * sys * (best of *) (glob)
323
325
324 detailed output:
326 detailed output:
325
327
326 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
328 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
327 ! wall * comb * user * sys * (best of *) (glob)
329 ! wall * comb * user * sys * (best of *) (glob)
328 ! wall * comb * user * sys * (max of *) (glob)
330 ! wall * comb * user * sys * (max of *) (glob)
329 ! wall * comb * user * sys * (avg of *) (glob)
331 ! wall * comb * user * sys * (avg of *) (glob)
330 ! wall * comb * user * sys * (median of *) (glob)
332 ! wall * comb * user * sys * (median of *) (glob)
331
333
332 test json output
334 test json output
333 ----------------
335 ----------------
334
336
335 normal output:
337 normal output:
336
338
337 $ hg perfheads --template json --config perf.stub=no
339 $ hg perfheads --template json --config perf.stub=no
338 [
340 [
339 {
341 {
340 "comb": *, (glob)
342 "comb": *, (glob)
341 "count": *, (glob)
343 "count": *, (glob)
342 "sys": *, (glob)
344 "sys": *, (glob)
343 "user": *, (glob)
345 "user": *, (glob)
344 "wall": * (glob)
346 "wall": * (glob)
345 }
347 }
346 ]
348 ]
347
349
348 detailed output:
350 detailed output:
349
351
350 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
352 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
351 [
353 [
352 {
354 {
353 "avg.comb": *, (glob)
355 "avg.comb": *, (glob)
354 "avg.count": *, (glob)
356 "avg.count": *, (glob)
355 "avg.sys": *, (glob)
357 "avg.sys": *, (glob)
356 "avg.user": *, (glob)
358 "avg.user": *, (glob)
357 "avg.wall": *, (glob)
359 "avg.wall": *, (glob)
358 "comb": *, (glob)
360 "comb": *, (glob)
359 "count": *, (glob)
361 "count": *, (glob)
360 "max.comb": *, (glob)
362 "max.comb": *, (glob)
361 "max.count": *, (glob)
363 "max.count": *, (glob)
362 "max.sys": *, (glob)
364 "max.sys": *, (glob)
363 "max.user": *, (glob)
365 "max.user": *, (glob)
364 "max.wall": *, (glob)
366 "max.wall": *, (glob)
365 "median.comb": *, (glob)
367 "median.comb": *, (glob)
366 "median.count": *, (glob)
368 "median.count": *, (glob)
367 "median.sys": *, (glob)
369 "median.sys": *, (glob)
368 "median.user": *, (glob)
370 "median.user": *, (glob)
369 "median.wall": *, (glob)
371 "median.wall": *, (glob)
370 "sys": *, (glob)
372 "sys": *, (glob)
371 "user": *, (glob)
373 "user": *, (glob)
372 "wall": * (glob)
374 "wall": * (glob)
373 }
375 }
374 ]
376 ]
375
377
376 Test pre-run feature
378 Test pre-run feature
377 --------------------
379 --------------------
378
380
379 (perf discovery has some spurious output)
381 (perf discovery has some spurious output)
380
382
381 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
383 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
382 ! wall * comb * user * sys * (best of 1) (glob)
384 ! wall * comb * user * sys * (best of 1) (glob)
383 searching for changes
385 searching for changes
384 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
386 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
385 ! wall * comb * user * sys * (best of 1) (glob)
387 ! wall * comb * user * sys * (best of 1) (glob)
386 searching for changes
388 searching for changes
387 searching for changes
389 searching for changes
388 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
390 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
389 ! wall * comb * user * sys * (best of 1) (glob)
391 ! wall * comb * user * sys * (best of 1) (glob)
390 searching for changes
392 searching for changes
391 searching for changes
393 searching for changes
392 searching for changes
394 searching for changes
393 searching for changes
395 searching for changes
394 $ hg perf::bundle 'last(all(), 5)'
396 $ hg perf::bundle 'last(all(), 5)'
395 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
397 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
396 4 changesets found
398 4 changesets found
397 $ hg perf::unbundle last-5.hg
399 $ hg perf::unbundle last-5.hg
398
400
399
401
400 test profile-benchmark option
402 test profile-benchmark option
401 ------------------------------
403 ------------------------------
402
404
403 Function to check that statprof ran
405 Function to check that statprof ran
404 $ statprofran () {
406 $ statprofran () {
405 > egrep 'Sample count:|No samples recorded' > /dev/null
407 > egrep 'Sample count:|No samples recorded' > /dev/null
406 > }
408 > }
407 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
409 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
408
410
409 Check perf.py for historical portability
411 Check perf.py for historical portability
410 ----------------------------------------
412 ----------------------------------------
411
413
412 $ cd "$TESTDIR/.."
414 $ cd "$TESTDIR/.."
413
415
414 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
416 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
415 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
417 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
416 > "$TESTDIR"/check-perf-code.py contrib/perf.py
418 > "$TESTDIR"/check-perf-code.py contrib/perf.py
417 contrib/perf.py:\d+: (re)
419 contrib/perf.py:\d+: (re)
418 > from mercurial import (
420 > from mercurial import (
419 import newer module separately in try clause for early Mercurial
421 import newer module separately in try clause for early Mercurial
420 contrib/perf.py:\d+: (re)
422 contrib/perf.py:\d+: (re)
421 > from mercurial import (
423 > from mercurial import (
422 import newer module separately in try clause for early Mercurial
424 import newer module separately in try clause for early Mercurial
423 contrib/perf.py:\d+: (re)
425 contrib/perf.py:\d+: (re)
424 > origindexpath = orig.opener.join(indexfile)
426 > origindexpath = orig.opener.join(indexfile)
425 use getvfs()/getsvfs() for early Mercurial
427 use getvfs()/getsvfs() for early Mercurial
426 contrib/perf.py:\d+: (re)
428 contrib/perf.py:\d+: (re)
427 > origdatapath = orig.opener.join(datafile)
429 > origdatapath = orig.opener.join(datafile)
428 use getvfs()/getsvfs() for early Mercurial
430 use getvfs()/getsvfs() for early Mercurial
429 contrib/perf.py:\d+: (re)
431 contrib/perf.py:\d+: (re)
430 > vfs = vfsmod.vfs(tmpdir)
432 > vfs = vfsmod.vfs(tmpdir)
431 use getvfs()/getsvfs() for early Mercurial
433 use getvfs()/getsvfs() for early Mercurial
432 contrib/perf.py:\d+: (re)
434 contrib/perf.py:\d+: (re)
433 > vfs.options = getattr(orig.opener, 'options', None)
435 > vfs.options = getattr(orig.opener, 'options', None)
434 use getvfs()/getsvfs() for early Mercurial
436 use getvfs()/getsvfs() for early Mercurial
435 [1]
437 [1]
General Comments 0
You need to be logged in to leave comments. Login now