##// END OF EJS Templates
perf: start recording total time after warming...
marmoute -
r52484:ecf4b959 default
parent child Browse files
Show More
@@ -1,4706 +1,4706 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (by default, the first iteration is benchmarked)
23 (by default, the first iteration is benchmarked)
24
24
25 ``profiled-runs``
25 ``profiled-runs``
26 list of iteration to profile (starting from 0)
26 list of iteration to profile (starting from 0)
27
27
28 ``run-limits``
28 ``run-limits``
29 Control the number of runs each benchmark will perform. The option value
29 Control the number of runs each benchmark will perform. The option value
30 should be a list of `<time>-<numberofrun>` pairs. After each run the
30 should be a list of `<time>-<numberofrun>` pairs. After each run the
31 conditions are considered in order with the following logic:
31 conditions are considered in order with the following logic:
32
32
33 If benchmark has been running for <time> seconds, and we have performed
33 If benchmark has been running for <time> seconds, and we have performed
34 <numberofrun> iterations, stop the benchmark,
34 <numberofrun> iterations, stop the benchmark,
35
35
36 The default value is: `3.0-100, 10.0-3`
36 The default value is: `3.0-100, 10.0-3`
37
37
38 ``stub``
38 ``stub``
39 When set, benchmarks will only be run once, useful for testing
39 When set, benchmarks will only be run once, useful for testing
40 (default: off)
40 (default: off)
41 '''
41 '''
42
42
43 # "historical portability" policy of perf.py:
43 # "historical portability" policy of perf.py:
44 #
44 #
45 # We have to do:
45 # We have to do:
46 # - make perf.py "loadable" with as wide Mercurial version as possible
46 # - make perf.py "loadable" with as wide Mercurial version as possible
47 # This doesn't mean that perf commands work correctly with that Mercurial.
47 # This doesn't mean that perf commands work correctly with that Mercurial.
48 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
48 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
49 # - make historical perf command work correctly with as wide Mercurial
49 # - make historical perf command work correctly with as wide Mercurial
50 # version as possible
50 # version as possible
51 #
51 #
52 # We have to do, if possible with reasonable cost:
52 # We have to do, if possible with reasonable cost:
53 # - make recent perf command for historical feature work correctly
53 # - make recent perf command for historical feature work correctly
54 # with early Mercurial
54 # with early Mercurial
55 #
55 #
56 # We don't have to do:
56 # We don't have to do:
57 # - make perf command for recent feature work correctly with early
57 # - make perf command for recent feature work correctly with early
58 # Mercurial
58 # Mercurial
59
59
60 import contextlib
60 import contextlib
61 import functools
61 import functools
62 import gc
62 import gc
63 import os
63 import os
64 import random
64 import random
65 import shutil
65 import shutil
66 import struct
66 import struct
67 import sys
67 import sys
68 import tempfile
68 import tempfile
69 import threading
69 import threading
70 import time
70 import time
71
71
72 import mercurial.revlog
72 import mercurial.revlog
73 from mercurial import (
73 from mercurial import (
74 changegroup,
74 changegroup,
75 cmdutil,
75 cmdutil,
76 commands,
76 commands,
77 copies,
77 copies,
78 error,
78 error,
79 extensions,
79 extensions,
80 hg,
80 hg,
81 mdiff,
81 mdiff,
82 merge,
82 merge,
83 util,
83 util,
84 )
84 )
85
85
86 # for "historical portability":
86 # for "historical portability":
87 # try to import modules separately (in dict order), and ignore
87 # try to import modules separately (in dict order), and ignore
88 # failure, because these aren't available with early Mercurial
88 # failure, because these aren't available with early Mercurial
89 try:
89 try:
90 from mercurial import branchmap # since 2.5 (or bcee63733aad)
90 from mercurial import branchmap # since 2.5 (or bcee63733aad)
91 except ImportError:
91 except ImportError:
92 pass
92 pass
93 try:
93 try:
94 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
94 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
95 except ImportError:
95 except ImportError:
96 pass
96 pass
97 try:
97 try:
98 from mercurial import registrar # since 3.7 (or 37d50250b696)
98 from mercurial import registrar # since 3.7 (or 37d50250b696)
99
99
100 dir(registrar) # forcibly load it
100 dir(registrar) # forcibly load it
101 except ImportError:
101 except ImportError:
102 registrar = None
102 registrar = None
103 try:
103 try:
104 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
104 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
105 except ImportError:
105 except ImportError:
106 pass
106 pass
107 try:
107 try:
108 from mercurial.utils import repoviewutil # since 5.0
108 from mercurial.utils import repoviewutil # since 5.0
109 except ImportError:
109 except ImportError:
110 repoviewutil = None
110 repoviewutil = None
111 try:
111 try:
112 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
112 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115 try:
115 try:
116 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
116 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
117 except ImportError:
117 except ImportError:
118 pass
118 pass
119
119
120 try:
120 try:
121 from mercurial import profiling
121 from mercurial import profiling
122 except ImportError:
122 except ImportError:
123 profiling = None
123 profiling = None
124
124
125 try:
125 try:
126 from mercurial.revlogutils import constants as revlog_constants
126 from mercurial.revlogutils import constants as revlog_constants
127
127
128 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
128 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
129
129
130 def revlog(opener, *args, **kwargs):
130 def revlog(opener, *args, **kwargs):
131 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
131 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
132
132
133
133
134 except (ImportError, AttributeError):
134 except (ImportError, AttributeError):
135 perf_rl_kind = None
135 perf_rl_kind = None
136
136
137 def revlog(opener, *args, **kwargs):
137 def revlog(opener, *args, **kwargs):
138 return mercurial.revlog.revlog(opener, *args, **kwargs)
138 return mercurial.revlog.revlog(opener, *args, **kwargs)
139
139
140
140
141 def identity(a):
141 def identity(a):
142 return a
142 return a
143
143
144
144
145 try:
145 try:
146 from mercurial import pycompat
146 from mercurial import pycompat
147
147
148 getargspec = pycompat.getargspec # added to module after 4.5
148 getargspec = pycompat.getargspec # added to module after 4.5
149 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
149 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
150 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
150 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
151 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
151 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
152 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
152 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
153 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
153 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
154 if pycompat.ispy3:
154 if pycompat.ispy3:
155 _maxint = sys.maxsize # per py3 docs for replacing maxint
155 _maxint = sys.maxsize # per py3 docs for replacing maxint
156 else:
156 else:
157 _maxint = sys.maxint
157 _maxint = sys.maxint
158 except (NameError, ImportError, AttributeError):
158 except (NameError, ImportError, AttributeError):
159 import inspect
159 import inspect
160
160
161 getargspec = inspect.getargspec
161 getargspec = inspect.getargspec
162 _byteskwargs = identity
162 _byteskwargs = identity
163 _bytestr = str
163 _bytestr = str
164 fsencode = identity # no py3 support
164 fsencode = identity # no py3 support
165 _maxint = sys.maxint # no py3 support
165 _maxint = sys.maxint # no py3 support
166 _sysstr = lambda x: x # no py3 support
166 _sysstr = lambda x: x # no py3 support
167 _xrange = xrange
167 _xrange = xrange
168
168
169 try:
169 try:
170 # 4.7+
170 # 4.7+
171 queue = pycompat.queue.Queue
171 queue = pycompat.queue.Queue
172 except (NameError, AttributeError, ImportError):
172 except (NameError, AttributeError, ImportError):
173 # <4.7.
173 # <4.7.
174 try:
174 try:
175 queue = pycompat.queue
175 queue = pycompat.queue
176 except (NameError, AttributeError, ImportError):
176 except (NameError, AttributeError, ImportError):
177 import Queue as queue
177 import Queue as queue
178
178
179 try:
179 try:
180 from mercurial import logcmdutil
180 from mercurial import logcmdutil
181
181
182 makelogtemplater = logcmdutil.maketemplater
182 makelogtemplater = logcmdutil.maketemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 try:
184 try:
185 makelogtemplater = cmdutil.makelogtemplater
185 makelogtemplater = cmdutil.makelogtemplater
186 except (AttributeError, ImportError):
186 except (AttributeError, ImportError):
187 makelogtemplater = None
187 makelogtemplater = None
188
188
189 # for "historical portability":
189 # for "historical portability":
190 # define util.safehasattr forcibly, because util.safehasattr has been
190 # define util.safehasattr forcibly, because util.safehasattr has been
191 # available since 1.9.3 (or 94b200a11cf7)
191 # available since 1.9.3 (or 94b200a11cf7)
192 _undefined = object()
192 _undefined = object()
193
193
194
194
195 def safehasattr(thing, attr):
195 def safehasattr(thing, attr):
196 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
196 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
197
197
198
198
199 setattr(util, 'safehasattr', safehasattr)
199 setattr(util, 'safehasattr', safehasattr)
200
200
201 # for "historical portability":
201 # for "historical portability":
202 # define util.timer forcibly, because util.timer has been available
202 # define util.timer forcibly, because util.timer has been available
203 # since ae5d60bb70c9
203 # since ae5d60bb70c9
204 if safehasattr(time, 'perf_counter'):
204 if safehasattr(time, 'perf_counter'):
205 util.timer = time.perf_counter
205 util.timer = time.perf_counter
206 elif os.name == b'nt':
206 elif os.name == b'nt':
207 util.timer = time.clock
207 util.timer = time.clock
208 else:
208 else:
209 util.timer = time.time
209 util.timer = time.time
210
210
211 # for "historical portability":
211 # for "historical portability":
212 # use locally defined empty option list, if formatteropts isn't
212 # use locally defined empty option list, if formatteropts isn't
213 # available, because commands.formatteropts has been available since
213 # available, because commands.formatteropts has been available since
214 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
214 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
215 # available since 2.2 (or ae5f92e154d3)
215 # available since 2.2 (or ae5f92e154d3)
216 formatteropts = getattr(
216 formatteropts = getattr(
217 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
217 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
218 )
218 )
219
219
220 # for "historical portability":
220 # for "historical portability":
221 # use locally defined option list, if debugrevlogopts isn't available,
221 # use locally defined option list, if debugrevlogopts isn't available,
222 # because commands.debugrevlogopts has been available since 3.7 (or
222 # because commands.debugrevlogopts has been available since 3.7 (or
223 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
223 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
224 # since 1.9 (or a79fea6b3e77).
224 # since 1.9 (or a79fea6b3e77).
225 revlogopts = getattr(
225 revlogopts = getattr(
226 cmdutil,
226 cmdutil,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 getattr(
228 getattr(
229 commands,
229 commands,
230 "debugrevlogopts",
230 "debugrevlogopts",
231 [
231 [
232 (b'c', b'changelog', False, b'open changelog'),
232 (b'c', b'changelog', False, b'open changelog'),
233 (b'm', b'manifest', False, b'open manifest'),
233 (b'm', b'manifest', False, b'open manifest'),
234 (b'', b'dir', False, b'open directory manifest'),
234 (b'', b'dir', False, b'open directory manifest'),
235 ],
235 ],
236 ),
236 ),
237 )
237 )
238
238
239 cmdtable = {}
239 cmdtable = {}
240
240
241
241
242 # for "historical portability":
242 # for "historical portability":
243 # define parsealiases locally, because cmdutil.parsealiases has been
243 # define parsealiases locally, because cmdutil.parsealiases has been
244 # available since 1.5 (or 6252852b4332)
244 # available since 1.5 (or 6252852b4332)
245 def parsealiases(cmd):
245 def parsealiases(cmd):
246 return cmd.split(b"|")
246 return cmd.split(b"|")
247
247
248
248
249 if safehasattr(registrar, 'command'):
249 if safehasattr(registrar, 'command'):
250 command = registrar.command(cmdtable)
250 command = registrar.command(cmdtable)
251 elif safehasattr(cmdutil, 'command'):
251 elif safehasattr(cmdutil, 'command'):
252 command = cmdutil.command(cmdtable)
252 command = cmdutil.command(cmdtable)
253 if 'norepo' not in getargspec(command).args:
253 if 'norepo' not in getargspec(command).args:
254 # for "historical portability":
254 # for "historical portability":
255 # wrap original cmdutil.command, because "norepo" option has
255 # wrap original cmdutil.command, because "norepo" option has
256 # been available since 3.1 (or 75a96326cecb)
256 # been available since 3.1 (or 75a96326cecb)
257 _command = command
257 _command = command
258
258
259 def command(name, options=(), synopsis=None, norepo=False):
259 def command(name, options=(), synopsis=None, norepo=False):
260 if norepo:
260 if norepo:
261 commands.norepo += b' %s' % b' '.join(parsealiases(name))
261 commands.norepo += b' %s' % b' '.join(parsealiases(name))
262 return _command(name, list(options), synopsis)
262 return _command(name, list(options), synopsis)
263
263
264
264
265 else:
265 else:
266 # for "historical portability":
266 # for "historical portability":
267 # define "@command" annotation locally, because cmdutil.command
267 # define "@command" annotation locally, because cmdutil.command
268 # has been available since 1.9 (or 2daa5179e73f)
268 # has been available since 1.9 (or 2daa5179e73f)
269 def command(name, options=(), synopsis=None, norepo=False):
269 def command(name, options=(), synopsis=None, norepo=False):
270 def decorator(func):
270 def decorator(func):
271 if synopsis:
271 if synopsis:
272 cmdtable[name] = func, list(options), synopsis
272 cmdtable[name] = func, list(options), synopsis
273 else:
273 else:
274 cmdtable[name] = func, list(options)
274 cmdtable[name] = func, list(options)
275 if norepo:
275 if norepo:
276 commands.norepo += b' %s' % b' '.join(parsealiases(name))
276 commands.norepo += b' %s' % b' '.join(parsealiases(name))
277 return func
277 return func
278
278
279 return decorator
279 return decorator
280
280
281
281
282 try:
282 try:
283 import mercurial.registrar
283 import mercurial.registrar
284 import mercurial.configitems
284 import mercurial.configitems
285
285
286 configtable = {}
286 configtable = {}
287 configitem = mercurial.registrar.configitem(configtable)
287 configitem = mercurial.registrar.configitem(configtable)
288 configitem(
288 configitem(
289 b'perf',
289 b'perf',
290 b'presleep',
290 b'presleep',
291 default=mercurial.configitems.dynamicdefault,
291 default=mercurial.configitems.dynamicdefault,
292 experimental=True,
292 experimental=True,
293 )
293 )
294 configitem(
294 configitem(
295 b'perf',
295 b'perf',
296 b'stub',
296 b'stub',
297 default=mercurial.configitems.dynamicdefault,
297 default=mercurial.configitems.dynamicdefault,
298 experimental=True,
298 experimental=True,
299 )
299 )
300 configitem(
300 configitem(
301 b'perf',
301 b'perf',
302 b'parentscount',
302 b'parentscount',
303 default=mercurial.configitems.dynamicdefault,
303 default=mercurial.configitems.dynamicdefault,
304 experimental=True,
304 experimental=True,
305 )
305 )
306 configitem(
306 configitem(
307 b'perf',
307 b'perf',
308 b'all-timing',
308 b'all-timing',
309 default=mercurial.configitems.dynamicdefault,
309 default=mercurial.configitems.dynamicdefault,
310 experimental=True,
310 experimental=True,
311 )
311 )
312 configitem(
312 configitem(
313 b'perf',
313 b'perf',
314 b'pre-run',
314 b'pre-run',
315 default=mercurial.configitems.dynamicdefault,
315 default=mercurial.configitems.dynamicdefault,
316 )
316 )
317 configitem(
317 configitem(
318 b'perf',
318 b'perf',
319 b'profile-benchmark',
319 b'profile-benchmark',
320 default=mercurial.configitems.dynamicdefault,
320 default=mercurial.configitems.dynamicdefault,
321 )
321 )
322 configitem(
322 configitem(
323 b'perf',
323 b'perf',
324 b'profiled-runs',
324 b'profiled-runs',
325 default=mercurial.configitems.dynamicdefault,
325 default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'run-limits',
329 b'run-limits',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 experimental=True,
331 experimental=True,
332 )
332 )
333 except (ImportError, AttributeError):
333 except (ImportError, AttributeError):
334 pass
334 pass
335 except TypeError:
335 except TypeError:
336 # compatibility fix for a11fd395e83f
336 # compatibility fix for a11fd395e83f
337 # hg version: 5.2
337 # hg version: 5.2
338 configitem(
338 configitem(
339 b'perf',
339 b'perf',
340 b'presleep',
340 b'presleep',
341 default=mercurial.configitems.dynamicdefault,
341 default=mercurial.configitems.dynamicdefault,
342 )
342 )
343 configitem(
343 configitem(
344 b'perf',
344 b'perf',
345 b'stub',
345 b'stub',
346 default=mercurial.configitems.dynamicdefault,
346 default=mercurial.configitems.dynamicdefault,
347 )
347 )
348 configitem(
348 configitem(
349 b'perf',
349 b'perf',
350 b'parentscount',
350 b'parentscount',
351 default=mercurial.configitems.dynamicdefault,
351 default=mercurial.configitems.dynamicdefault,
352 )
352 )
353 configitem(
353 configitem(
354 b'perf',
354 b'perf',
355 b'all-timing',
355 b'all-timing',
356 default=mercurial.configitems.dynamicdefault,
356 default=mercurial.configitems.dynamicdefault,
357 )
357 )
358 configitem(
358 configitem(
359 b'perf',
359 b'perf',
360 b'pre-run',
360 b'pre-run',
361 default=mercurial.configitems.dynamicdefault,
361 default=mercurial.configitems.dynamicdefault,
362 )
362 )
363 configitem(
363 configitem(
364 b'perf',
364 b'perf',
365 b'profiled-runs',
365 b'profiled-runs',
366 default=mercurial.configitems.dynamicdefault,
366 default=mercurial.configitems.dynamicdefault,
367 )
367 )
368 configitem(
368 configitem(
369 b'perf',
369 b'perf',
370 b'run-limits',
370 b'run-limits',
371 default=mercurial.configitems.dynamicdefault,
371 default=mercurial.configitems.dynamicdefault,
372 )
372 )
373
373
374
374
375 def getlen(ui):
375 def getlen(ui):
376 if ui.configbool(b"perf", b"stub", False):
376 if ui.configbool(b"perf", b"stub", False):
377 return lambda x: 1
377 return lambda x: 1
378 return len
378 return len
379
379
380
380
381 class noop:
381 class noop:
382 """dummy context manager"""
382 """dummy context manager"""
383
383
384 def __enter__(self):
384 def __enter__(self):
385 pass
385 pass
386
386
387 def __exit__(self, *args):
387 def __exit__(self, *args):
388 pass
388 pass
389
389
390
390
391 NOOPCTX = noop()
391 NOOPCTX = noop()
392
392
393
393
394 def gettimer(ui, opts=None):
394 def gettimer(ui, opts=None):
395 """return a timer function and formatter: (timer, formatter)
395 """return a timer function and formatter: (timer, formatter)
396
396
397 This function exists to gather the creation of formatter in a single
397 This function exists to gather the creation of formatter in a single
398 place instead of duplicating it in all performance commands."""
398 place instead of duplicating it in all performance commands."""
399
399
400 # enforce an idle period before execution to counteract power management
400 # enforce an idle period before execution to counteract power management
401 # experimental config: perf.presleep
401 # experimental config: perf.presleep
402 time.sleep(getint(ui, b"perf", b"presleep", 1))
402 time.sleep(getint(ui, b"perf", b"presleep", 1))
403
403
404 if opts is None:
404 if opts is None:
405 opts = {}
405 opts = {}
406 # redirect all to stderr unless buffer api is in use
406 # redirect all to stderr unless buffer api is in use
407 if not ui._buffers:
407 if not ui._buffers:
408 ui = ui.copy()
408 ui = ui.copy()
409 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
409 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
410 if uifout:
410 if uifout:
411 # for "historical portability":
411 # for "historical portability":
412 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
412 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
413 uifout.set(ui.ferr)
413 uifout.set(ui.ferr)
414
414
415 # get a formatter
415 # get a formatter
416 uiformatter = getattr(ui, 'formatter', None)
416 uiformatter = getattr(ui, 'formatter', None)
417 if uiformatter:
417 if uiformatter:
418 fm = uiformatter(b'perf', opts)
418 fm = uiformatter(b'perf', opts)
419 else:
419 else:
420 # for "historical portability":
420 # for "historical portability":
421 # define formatter locally, because ui.formatter has been
421 # define formatter locally, because ui.formatter has been
422 # available since 2.2 (or ae5f92e154d3)
422 # available since 2.2 (or ae5f92e154d3)
423 from mercurial import node
423 from mercurial import node
424
424
425 class defaultformatter:
425 class defaultformatter:
426 """Minimized composition of baseformatter and plainformatter"""
426 """Minimized composition of baseformatter and plainformatter"""
427
427
428 def __init__(self, ui, topic, opts):
428 def __init__(self, ui, topic, opts):
429 self._ui = ui
429 self._ui = ui
430 if ui.debugflag:
430 if ui.debugflag:
431 self.hexfunc = node.hex
431 self.hexfunc = node.hex
432 else:
432 else:
433 self.hexfunc = node.short
433 self.hexfunc = node.short
434
434
435 def __nonzero__(self):
435 def __nonzero__(self):
436 return False
436 return False
437
437
438 __bool__ = __nonzero__
438 __bool__ = __nonzero__
439
439
440 def startitem(self):
440 def startitem(self):
441 pass
441 pass
442
442
443 def data(self, **data):
443 def data(self, **data):
444 pass
444 pass
445
445
446 def write(self, fields, deftext, *fielddata, **opts):
446 def write(self, fields, deftext, *fielddata, **opts):
447 self._ui.write(deftext % fielddata, **opts)
447 self._ui.write(deftext % fielddata, **opts)
448
448
449 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
449 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
450 if cond:
450 if cond:
451 self._ui.write(deftext % fielddata, **opts)
451 self._ui.write(deftext % fielddata, **opts)
452
452
453 def plain(self, text, **opts):
453 def plain(self, text, **opts):
454 self._ui.write(text, **opts)
454 self._ui.write(text, **opts)
455
455
456 def end(self):
456 def end(self):
457 pass
457 pass
458
458
459 fm = defaultformatter(ui, b'perf', opts)
459 fm = defaultformatter(ui, b'perf', opts)
460
460
461 # stub function, runs code only once instead of in a loop
461 # stub function, runs code only once instead of in a loop
462 # experimental config: perf.stub
462 # experimental config: perf.stub
463 if ui.configbool(b"perf", b"stub", False):
463 if ui.configbool(b"perf", b"stub", False):
464 return functools.partial(stub_timer, fm), fm
464 return functools.partial(stub_timer, fm), fm
465
465
466 # experimental config: perf.all-timing
466 # experimental config: perf.all-timing
467 displayall = ui.configbool(b"perf", b"all-timing", True)
467 displayall = ui.configbool(b"perf", b"all-timing", True)
468
468
469 # experimental config: perf.run-limits
469 # experimental config: perf.run-limits
470 limitspec = ui.configlist(b"perf", b"run-limits", [])
470 limitspec = ui.configlist(b"perf", b"run-limits", [])
471 limits = []
471 limits = []
472 for item in limitspec:
472 for item in limitspec:
473 parts = item.split(b'-', 1)
473 parts = item.split(b'-', 1)
474 if len(parts) < 2:
474 if len(parts) < 2:
475 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
475 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
476 continue
476 continue
477 try:
477 try:
478 time_limit = float(_sysstr(parts[0]))
478 time_limit = float(_sysstr(parts[0]))
479 except ValueError as e:
479 except ValueError as e:
480 ui.warn(
480 ui.warn(
481 (
481 (
482 b'malformatted run limit entry, %s: %s\n'
482 b'malformatted run limit entry, %s: %s\n'
483 % (_bytestr(e), item)
483 % (_bytestr(e), item)
484 )
484 )
485 )
485 )
486 continue
486 continue
487 try:
487 try:
488 run_limit = int(_sysstr(parts[1]))
488 run_limit = int(_sysstr(parts[1]))
489 except ValueError as e:
489 except ValueError as e:
490 ui.warn(
490 ui.warn(
491 (
491 (
492 b'malformatted run limit entry, %s: %s\n'
492 b'malformatted run limit entry, %s: %s\n'
493 % (_bytestr(e), item)
493 % (_bytestr(e), item)
494 )
494 )
495 )
495 )
496 continue
496 continue
497 limits.append((time_limit, run_limit))
497 limits.append((time_limit, run_limit))
498 if not limits:
498 if not limits:
499 limits = DEFAULTLIMITS
499 limits = DEFAULTLIMITS
500
500
501 profiler = None
501 profiler = None
502 profiled_runs = set()
502 profiled_runs = set()
503 if profiling is not None:
503 if profiling is not None:
504 if ui.configbool(b"perf", b"profile-benchmark", False):
504 if ui.configbool(b"perf", b"profile-benchmark", False):
505 profiler = lambda: profiling.profile(ui)
505 profiler = lambda: profiling.profile(ui)
506 for run in ui.configlist(b"perf", b"profiled-runs", [0]):
506 for run in ui.configlist(b"perf", b"profiled-runs", [0]):
507 profiled_runs.add(int(run))
507 profiled_runs.add(int(run))
508
508
509 prerun = getint(ui, b"perf", b"pre-run", 0)
509 prerun = getint(ui, b"perf", b"pre-run", 0)
510 t = functools.partial(
510 t = functools.partial(
511 _timer,
511 _timer,
512 fm,
512 fm,
513 displayall=displayall,
513 displayall=displayall,
514 limits=limits,
514 limits=limits,
515 prerun=prerun,
515 prerun=prerun,
516 profiler=profiler,
516 profiler=profiler,
517 profiled_runs=profiled_runs,
517 profiled_runs=profiled_runs,
518 )
518 )
519 return t, fm
519 return t, fm
520
520
521
521
522 def stub_timer(fm, func, setup=None, title=None):
522 def stub_timer(fm, func, setup=None, title=None):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526
526
527
527
528 @contextlib.contextmanager
528 @contextlib.contextmanager
529 def timeone():
529 def timeone():
530 r = []
530 r = []
531 ostart = os.times()
531 ostart = os.times()
532 cstart = util.timer()
532 cstart = util.timer()
533 yield r
533 yield r
534 cstop = util.timer()
534 cstop = util.timer()
535 ostop = os.times()
535 ostop = os.times()
536 a, b = ostart, ostop
536 a, b = ostart, ostop
537 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
537 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
538
538
539
539
540 # list of stop condition (elapsed time, minimal run count)
540 # list of stop condition (elapsed time, minimal run count)
541 DEFAULTLIMITS = (
541 DEFAULTLIMITS = (
542 (3.0, 100),
542 (3.0, 100),
543 (10.0, 3),
543 (10.0, 3),
544 )
544 )
545
545
546
546
547 @contextlib.contextmanager
547 @contextlib.contextmanager
548 def noop_context():
548 def noop_context():
549 yield
549 yield
550
550
551
551
552 def _timer(
552 def _timer(
553 fm,
553 fm,
554 func,
554 func,
555 setup=None,
555 setup=None,
556 context=noop_context,
556 context=noop_context,
557 title=None,
557 title=None,
558 displayall=False,
558 displayall=False,
559 limits=DEFAULTLIMITS,
559 limits=DEFAULTLIMITS,
560 prerun=0,
560 prerun=0,
561 profiler=None,
561 profiler=None,
562 profiled_runs=(0,),
562 profiled_runs=(0,),
563 ):
563 ):
564 gc.collect()
564 gc.collect()
565 results = []
565 results = []
566 begin = util.timer()
567 count = 0
566 count = 0
568 if profiler is None:
567 if profiler is None:
569 profiler = lambda: NOOPCTX
568 profiler = lambda: NOOPCTX
570 for i in range(prerun):
569 for i in range(prerun):
571 if setup is not None:
570 if setup is not None:
572 setup()
571 setup()
573 with context():
572 with context():
574 func()
573 func()
574 begin = util.timer()
575 keepgoing = True
575 keepgoing = True
576 while keepgoing:
576 while keepgoing:
577 if count in profiled_runs:
577 if count in profiled_runs:
578 prof = profiler()
578 prof = profiler()
579 else:
579 else:
580 prof = NOOPCTX
580 prof = NOOPCTX
581 if setup is not None:
581 if setup is not None:
582 setup()
582 setup()
583 with context():
583 with context():
584 gc.collect()
584 gc.collect()
585 with prof:
585 with prof:
586 with timeone() as item:
586 with timeone() as item:
587 r = func()
587 r = func()
588 count += 1
588 count += 1
589 results.append(item[0])
589 results.append(item[0])
590 cstop = util.timer()
590 cstop = util.timer()
591 # Look for a stop condition.
591 # Look for a stop condition.
592 elapsed = cstop - begin
592 elapsed = cstop - begin
593 for t, mincount in limits:
593 for t, mincount in limits:
594 if elapsed >= t and count >= mincount:
594 if elapsed >= t and count >= mincount:
595 keepgoing = False
595 keepgoing = False
596 break
596 break
597
597
598 formatone(fm, results, title=title, result=r, displayall=displayall)
598 formatone(fm, results, title=title, result=r, displayall=displayall)
599
599
600
600
601 def formatone(fm, timings, title=None, result=None, displayall=False):
601 def formatone(fm, timings, title=None, result=None, displayall=False):
602 count = len(timings)
602 count = len(timings)
603
603
604 fm.startitem()
604 fm.startitem()
605
605
606 if title:
606 if title:
607 fm.write(b'title', b'! %s\n', title)
607 fm.write(b'title', b'! %s\n', title)
608 if result:
608 if result:
609 fm.write(b'result', b'! result: %s\n', result)
609 fm.write(b'result', b'! result: %s\n', result)
610
610
611 def display(role, entry):
611 def display(role, entry):
612 prefix = b''
612 prefix = b''
613 if role != b'best':
613 if role != b'best':
614 prefix = b'%s.' % role
614 prefix = b'%s.' % role
615 fm.plain(b'!')
615 fm.plain(b'!')
616 fm.write(prefix + b'wall', b' wall %f', entry[0])
616 fm.write(prefix + b'wall', b' wall %f', entry[0])
617 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
617 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
618 fm.write(prefix + b'user', b' user %f', entry[1])
618 fm.write(prefix + b'user', b' user %f', entry[1])
619 fm.write(prefix + b'sys', b' sys %f', entry[2])
619 fm.write(prefix + b'sys', b' sys %f', entry[2])
620 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
620 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
621 fm.plain(b'\n')
621 fm.plain(b'\n')
622
622
623 timings.sort()
623 timings.sort()
624 min_val = timings[0]
624 min_val = timings[0]
625 display(b'best', min_val)
625 display(b'best', min_val)
626 if displayall:
626 if displayall:
627 max_val = timings[-1]
627 max_val = timings[-1]
628 display(b'max', max_val)
628 display(b'max', max_val)
629 avg = tuple([sum(x) / count for x in zip(*timings)])
629 avg = tuple([sum(x) / count for x in zip(*timings)])
630 display(b'avg', avg)
630 display(b'avg', avg)
631 median = timings[len(timings) // 2]
631 median = timings[len(timings) // 2]
632 display(b'median', median)
632 display(b'median', median)
633
633
634
634
635 # utilities for historical portability
635 # utilities for historical portability
636
636
637
637
638 def getint(ui, section, name, default):
638 def getint(ui, section, name, default):
639 # for "historical portability":
639 # for "historical portability":
640 # ui.configint has been available since 1.9 (or fa2b596db182)
640 # ui.configint has been available since 1.9 (or fa2b596db182)
641 v = ui.config(section, name, None)
641 v = ui.config(section, name, None)
642 if v is None:
642 if v is None:
643 return default
643 return default
644 try:
644 try:
645 return int(v)
645 return int(v)
646 except ValueError:
646 except ValueError:
647 raise error.ConfigError(
647 raise error.ConfigError(
648 b"%s.%s is not an integer ('%s')" % (section, name, v)
648 b"%s.%s is not an integer ('%s')" % (section, name, v)
649 )
649 )
650
650
651
651
652 def safeattrsetter(obj, name, ignoremissing=False):
652 def safeattrsetter(obj, name, ignoremissing=False):
653 """Ensure that 'obj' has 'name' attribute before subsequent setattr
653 """Ensure that 'obj' has 'name' attribute before subsequent setattr
654
654
655 This function is aborted, if 'obj' doesn't have 'name' attribute
655 This function is aborted, if 'obj' doesn't have 'name' attribute
656 at runtime. This avoids overlooking removal of an attribute, which
656 at runtime. This avoids overlooking removal of an attribute, which
657 breaks assumption of performance measurement, in the future.
657 breaks assumption of performance measurement, in the future.
658
658
659 This function returns the object to (1) assign a new value, and
659 This function returns the object to (1) assign a new value, and
660 (2) restore an original value to the attribute.
660 (2) restore an original value to the attribute.
661
661
662 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
662 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
663 abortion, and this function returns None. This is useful to
663 abortion, and this function returns None. This is useful to
664 examine an attribute, which isn't ensured in all Mercurial
664 examine an attribute, which isn't ensured in all Mercurial
665 versions.
665 versions.
666 """
666 """
667 if not util.safehasattr(obj, name):
667 if not util.safehasattr(obj, name):
668 if ignoremissing:
668 if ignoremissing:
669 return None
669 return None
670 raise error.Abort(
670 raise error.Abort(
671 (
671 (
672 b"missing attribute %s of %s might break assumption"
672 b"missing attribute %s of %s might break assumption"
673 b" of performance measurement"
673 b" of performance measurement"
674 )
674 )
675 % (name, obj)
675 % (name, obj)
676 )
676 )
677
677
678 origvalue = getattr(obj, _sysstr(name))
678 origvalue = getattr(obj, _sysstr(name))
679
679
680 class attrutil:
680 class attrutil:
681 def set(self, newvalue):
681 def set(self, newvalue):
682 setattr(obj, _sysstr(name), newvalue)
682 setattr(obj, _sysstr(name), newvalue)
683
683
684 def restore(self):
684 def restore(self):
685 setattr(obj, _sysstr(name), origvalue)
685 setattr(obj, _sysstr(name), origvalue)
686
686
687 return attrutil()
687 return attrutil()
688
688
689
689
690 # utilities to examine each internal API changes
690 # utilities to examine each internal API changes
691
691
692
692
693 def getbranchmapsubsettable():
693 def getbranchmapsubsettable():
694 # for "historical portability":
694 # for "historical portability":
695 # subsettable is defined in:
695 # subsettable is defined in:
696 # - branchmap since 2.9 (or 175c6fd8cacc)
696 # - branchmap since 2.9 (or 175c6fd8cacc)
697 # - repoview since 2.5 (or 59a9f18d4587)
697 # - repoview since 2.5 (or 59a9f18d4587)
698 # - repoviewutil since 5.0
698 # - repoviewutil since 5.0
699 for mod in (branchmap, repoview, repoviewutil):
699 for mod in (branchmap, repoview, repoviewutil):
700 subsettable = getattr(mod, 'subsettable', None)
700 subsettable = getattr(mod, 'subsettable', None)
701 if subsettable:
701 if subsettable:
702 return subsettable
702 return subsettable
703
703
704 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
704 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
705 # branchmap and repoview modules exist, but subsettable attribute
705 # branchmap and repoview modules exist, but subsettable attribute
706 # doesn't)
706 # doesn't)
707 raise error.Abort(
707 raise error.Abort(
708 b"perfbranchmap not available with this Mercurial",
708 b"perfbranchmap not available with this Mercurial",
709 hint=b"use 2.5 or later",
709 hint=b"use 2.5 or later",
710 )
710 )
711
711
712
712
713 def getsvfs(repo):
713 def getsvfs(repo):
714 """Return appropriate object to access files under .hg/store"""
714 """Return appropriate object to access files under .hg/store"""
715 # for "historical portability":
715 # for "historical portability":
716 # repo.svfs has been available since 2.3 (or 7034365089bf)
716 # repo.svfs has been available since 2.3 (or 7034365089bf)
717 svfs = getattr(repo, 'svfs', None)
717 svfs = getattr(repo, 'svfs', None)
718 if svfs:
718 if svfs:
719 return svfs
719 return svfs
720 else:
720 else:
721 return getattr(repo, 'sopener')
721 return getattr(repo, 'sopener')
722
722
723
723
724 def getvfs(repo):
724 def getvfs(repo):
725 """Return appropriate object to access files under .hg"""
725 """Return appropriate object to access files under .hg"""
726 # for "historical portability":
726 # for "historical portability":
727 # repo.vfs has been available since 2.3 (or 7034365089bf)
727 # repo.vfs has been available since 2.3 (or 7034365089bf)
728 vfs = getattr(repo, 'vfs', None)
728 vfs = getattr(repo, 'vfs', None)
729 if vfs:
729 if vfs:
730 return vfs
730 return vfs
731 else:
731 else:
732 return getattr(repo, 'opener')
732 return getattr(repo, 'opener')
733
733
734
734
735 def repocleartagscachefunc(repo):
735 def repocleartagscachefunc(repo):
736 """Return the function to clear tags cache according to repo internal API"""
736 """Return the function to clear tags cache according to repo internal API"""
737 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
737 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
738 # in this case, setattr(repo, '_tagscache', None) or so isn't
738 # in this case, setattr(repo, '_tagscache', None) or so isn't
739 # correct way to clear tags cache, because existing code paths
739 # correct way to clear tags cache, because existing code paths
740 # expect _tagscache to be a structured object.
740 # expect _tagscache to be a structured object.
741 def clearcache():
741 def clearcache():
742 # _tagscache has been filteredpropertycache since 2.5 (or
742 # _tagscache has been filteredpropertycache since 2.5 (or
743 # 98c867ac1330), and delattr() can't work in such case
743 # 98c867ac1330), and delattr() can't work in such case
744 if '_tagscache' in vars(repo):
744 if '_tagscache' in vars(repo):
745 del repo.__dict__['_tagscache']
745 del repo.__dict__['_tagscache']
746
746
747 return clearcache
747 return clearcache
748
748
749 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
749 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
750 if repotags: # since 1.4 (or 5614a628d173)
750 if repotags: # since 1.4 (or 5614a628d173)
751 return lambda: repotags.set(None)
751 return lambda: repotags.set(None)
752
752
753 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
753 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
754 if repotagscache: # since 0.6 (or d7df759d0e97)
754 if repotagscache: # since 0.6 (or d7df759d0e97)
755 return lambda: repotagscache.set(None)
755 return lambda: repotagscache.set(None)
756
756
757 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
757 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
758 # this point, but it isn't so problematic, because:
758 # this point, but it isn't so problematic, because:
759 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
759 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
760 # in perftags() causes failure soon
760 # in perftags() causes failure soon
761 # - perf.py itself has been available since 1.1 (or eb240755386d)
761 # - perf.py itself has been available since 1.1 (or eb240755386d)
762 raise error.Abort(b"tags API of this hg command is unknown")
762 raise error.Abort(b"tags API of this hg command is unknown")
763
763
764
764
765 # utilities to clear cache
765 # utilities to clear cache
766
766
767
767
768 def clearfilecache(obj, attrname):
768 def clearfilecache(obj, attrname):
769 unfiltered = getattr(obj, 'unfiltered', None)
769 unfiltered = getattr(obj, 'unfiltered', None)
770 if unfiltered is not None:
770 if unfiltered is not None:
771 obj = obj.unfiltered()
771 obj = obj.unfiltered()
772 if attrname in vars(obj):
772 if attrname in vars(obj):
773 delattr(obj, attrname)
773 delattr(obj, attrname)
774 obj._filecache.pop(attrname, None)
774 obj._filecache.pop(attrname, None)
775
775
776
776
777 def clearchangelog(repo):
777 def clearchangelog(repo):
778 if repo is not repo.unfiltered():
778 if repo is not repo.unfiltered():
779 object.__setattr__(repo, '_clcachekey', None)
779 object.__setattr__(repo, '_clcachekey', None)
780 object.__setattr__(repo, '_clcache', None)
780 object.__setattr__(repo, '_clcache', None)
781 clearfilecache(repo.unfiltered(), 'changelog')
781 clearfilecache(repo.unfiltered(), 'changelog')
782
782
783
783
784 # perf commands
784 # perf commands
785
785
786
786
787 @command(b'perf::walk|perfwalk', formatteropts)
787 @command(b'perf::walk|perfwalk', formatteropts)
788 def perfwalk(ui, repo, *pats, **opts):
788 def perfwalk(ui, repo, *pats, **opts):
789 opts = _byteskwargs(opts)
789 opts = _byteskwargs(opts)
790 timer, fm = gettimer(ui, opts)
790 timer, fm = gettimer(ui, opts)
791 m = scmutil.match(repo[None], pats, {})
791 m = scmutil.match(repo[None], pats, {})
792 timer(
792 timer(
793 lambda: len(
793 lambda: len(
794 list(
794 list(
795 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
795 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
796 )
796 )
797 )
797 )
798 )
798 )
799 fm.end()
799 fm.end()
800
800
801
801
802 @command(b'perf::annotate|perfannotate', formatteropts)
802 @command(b'perf::annotate|perfannotate', formatteropts)
803 def perfannotate(ui, repo, f, **opts):
803 def perfannotate(ui, repo, f, **opts):
804 opts = _byteskwargs(opts)
804 opts = _byteskwargs(opts)
805 timer, fm = gettimer(ui, opts)
805 timer, fm = gettimer(ui, opts)
806 fc = repo[b'.'][f]
806 fc = repo[b'.'][f]
807 timer(lambda: len(fc.annotate(True)))
807 timer(lambda: len(fc.annotate(True)))
808 fm.end()
808 fm.end()
809
809
810
810
811 @command(
811 @command(
812 b'perf::status|perfstatus',
812 b'perf::status|perfstatus',
813 [
813 [
814 (b'u', b'unknown', False, b'ask status to look for unknown files'),
814 (b'u', b'unknown', False, b'ask status to look for unknown files'),
815 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
815 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
816 ]
816 ]
817 + formatteropts,
817 + formatteropts,
818 )
818 )
819 def perfstatus(ui, repo, **opts):
819 def perfstatus(ui, repo, **opts):
820 """benchmark the performance of a single status call
820 """benchmark the performance of a single status call
821
821
822 The repository data are preserved between each call.
822 The repository data are preserved between each call.
823
823
824 By default, only the status of the tracked file are requested. If
824 By default, only the status of the tracked file are requested. If
825 `--unknown` is passed, the "unknown" files are also tracked.
825 `--unknown` is passed, the "unknown" files are also tracked.
826 """
826 """
827 opts = _byteskwargs(opts)
827 opts = _byteskwargs(opts)
828 # m = match.always(repo.root, repo.getcwd())
828 # m = match.always(repo.root, repo.getcwd())
829 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
829 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
830 # False))))
830 # False))))
831 timer, fm = gettimer(ui, opts)
831 timer, fm = gettimer(ui, opts)
832 if opts[b'dirstate']:
832 if opts[b'dirstate']:
833 dirstate = repo.dirstate
833 dirstate = repo.dirstate
834 m = scmutil.matchall(repo)
834 m = scmutil.matchall(repo)
835 unknown = opts[b'unknown']
835 unknown = opts[b'unknown']
836
836
837 def status_dirstate():
837 def status_dirstate():
838 s = dirstate.status(
838 s = dirstate.status(
839 m, subrepos=[], ignored=False, clean=False, unknown=unknown
839 m, subrepos=[], ignored=False, clean=False, unknown=unknown
840 )
840 )
841 sum(map(bool, s))
841 sum(map(bool, s))
842
842
843 if util.safehasattr(dirstate, 'running_status'):
843 if util.safehasattr(dirstate, 'running_status'):
844 with dirstate.running_status(repo):
844 with dirstate.running_status(repo):
845 timer(status_dirstate)
845 timer(status_dirstate)
846 dirstate.invalidate()
846 dirstate.invalidate()
847 else:
847 else:
848 timer(status_dirstate)
848 timer(status_dirstate)
849 else:
849 else:
850 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
850 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
851 fm.end()
851 fm.end()
852
852
853
853
854 @command(b'perf::addremove|perfaddremove', formatteropts)
854 @command(b'perf::addremove|perfaddremove', formatteropts)
855 def perfaddremove(ui, repo, **opts):
855 def perfaddremove(ui, repo, **opts):
856 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
858 try:
858 try:
859 oldquiet = repo.ui.quiet
859 oldquiet = repo.ui.quiet
860 repo.ui.quiet = True
860 repo.ui.quiet = True
861 matcher = scmutil.match(repo[None])
861 matcher = scmutil.match(repo[None])
862 opts[b'dry_run'] = True
862 opts[b'dry_run'] = True
863 if 'uipathfn' in getargspec(scmutil.addremove).args:
863 if 'uipathfn' in getargspec(scmutil.addremove).args:
864 uipathfn = scmutil.getuipathfn(repo)
864 uipathfn = scmutil.getuipathfn(repo)
865 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
865 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
866 else:
866 else:
867 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
867 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
868 finally:
868 finally:
869 repo.ui.quiet = oldquiet
869 repo.ui.quiet = oldquiet
870 fm.end()
870 fm.end()
871
871
872
872
873 def clearcaches(cl):
873 def clearcaches(cl):
874 # behave somewhat consistently across internal API changes
874 # behave somewhat consistently across internal API changes
875 if util.safehasattr(cl, b'clearcaches'):
875 if util.safehasattr(cl, b'clearcaches'):
876 cl.clearcaches()
876 cl.clearcaches()
877 elif util.safehasattr(cl, b'_nodecache'):
877 elif util.safehasattr(cl, b'_nodecache'):
878 # <= hg-5.2
878 # <= hg-5.2
879 from mercurial.node import nullid, nullrev
879 from mercurial.node import nullid, nullrev
880
880
881 cl._nodecache = {nullid: nullrev}
881 cl._nodecache = {nullid: nullrev}
882 cl._nodepos = None
882 cl._nodepos = None
883
883
884
884
885 @command(b'perf::heads|perfheads', formatteropts)
885 @command(b'perf::heads|perfheads', formatteropts)
886 def perfheads(ui, repo, **opts):
886 def perfheads(ui, repo, **opts):
887 """benchmark the computation of a changelog heads"""
887 """benchmark the computation of a changelog heads"""
888 opts = _byteskwargs(opts)
888 opts = _byteskwargs(opts)
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 cl = repo.changelog
890 cl = repo.changelog
891
891
892 def s():
892 def s():
893 clearcaches(cl)
893 clearcaches(cl)
894
894
895 def d():
895 def d():
896 len(cl.headrevs())
896 len(cl.headrevs())
897
897
898 timer(d, setup=s)
898 timer(d, setup=s)
899 fm.end()
899 fm.end()
900
900
901
901
902 def _default_clear_on_disk_tags_cache(repo):
902 def _default_clear_on_disk_tags_cache(repo):
903 from mercurial import tags
903 from mercurial import tags
904
904
905 repo.cachevfs.tryunlink(tags._filename(repo))
905 repo.cachevfs.tryunlink(tags._filename(repo))
906
906
907
907
908 def _default_clear_on_disk_tags_fnodes_cache(repo):
908 def _default_clear_on_disk_tags_fnodes_cache(repo):
909 from mercurial import tags
909 from mercurial import tags
910
910
911 repo.cachevfs.tryunlink(tags._fnodescachefile)
911 repo.cachevfs.tryunlink(tags._fnodescachefile)
912
912
913
913
914 def _default_forget_fnodes(repo, revs):
914 def _default_forget_fnodes(repo, revs):
915 """function used by the perf extension to prune some entries from the
915 """function used by the perf extension to prune some entries from the
916 fnodes cache"""
916 fnodes cache"""
917 from mercurial import tags
917 from mercurial import tags
918
918
919 missing_1 = b'\xff' * 4
919 missing_1 = b'\xff' * 4
920 missing_2 = b'\xff' * 20
920 missing_2 = b'\xff' * 20
921 cache = tags.hgtagsfnodescache(repo.unfiltered())
921 cache = tags.hgtagsfnodescache(repo.unfiltered())
922 for r in revs:
922 for r in revs:
923 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
923 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
924 cache.write()
924 cache.write()
925
925
926
926
927 @command(
927 @command(
928 b'perf::tags|perftags',
928 b'perf::tags|perftags',
929 formatteropts
929 formatteropts
930 + [
930 + [
931 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
931 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
932 (
932 (
933 b'',
933 b'',
934 b'clear-on-disk-cache',
934 b'clear-on-disk-cache',
935 False,
935 False,
936 b'clear on disk tags cache (DESTRUCTIVE)',
936 b'clear on disk tags cache (DESTRUCTIVE)',
937 ),
937 ),
938 (
938 (
939 b'',
939 b'',
940 b'clear-fnode-cache-all',
940 b'clear-fnode-cache-all',
941 False,
941 False,
942 b'clear on disk file node cache (DESTRUCTIVE),',
942 b'clear on disk file node cache (DESTRUCTIVE),',
943 ),
943 ),
944 (
944 (
945 b'',
945 b'',
946 b'clear-fnode-cache-rev',
946 b'clear-fnode-cache-rev',
947 [],
947 [],
948 b'clear on disk file node cache (DESTRUCTIVE),',
948 b'clear on disk file node cache (DESTRUCTIVE),',
949 b'REVS',
949 b'REVS',
950 ),
950 ),
951 (
951 (
952 b'',
952 b'',
953 b'update-last',
953 b'update-last',
954 b'',
954 b'',
955 b'simulate an update over the last N revisions (DESTRUCTIVE),',
955 b'simulate an update over the last N revisions (DESTRUCTIVE),',
956 b'N',
956 b'N',
957 ),
957 ),
958 ],
958 ],
959 )
959 )
960 def perftags(ui, repo, **opts):
960 def perftags(ui, repo, **opts):
961 """Benchmark tags retrieval in various situation
961 """Benchmark tags retrieval in various situation
962
962
963 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
963 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
964 altering performance after the command was run. However, it does not
964 altering performance after the command was run. However, it does not
965 destroy any stored data.
965 destroy any stored data.
966 """
966 """
967 from mercurial import tags
967 from mercurial import tags
968
968
969 opts = _byteskwargs(opts)
969 opts = _byteskwargs(opts)
970 timer, fm = gettimer(ui, opts)
970 timer, fm = gettimer(ui, opts)
971 repocleartagscache = repocleartagscachefunc(repo)
971 repocleartagscache = repocleartagscachefunc(repo)
972 clearrevlogs = opts[b'clear_revlogs']
972 clearrevlogs = opts[b'clear_revlogs']
973 clear_disk = opts[b'clear_on_disk_cache']
973 clear_disk = opts[b'clear_on_disk_cache']
974 clear_fnode = opts[b'clear_fnode_cache_all']
974 clear_fnode = opts[b'clear_fnode_cache_all']
975
975
976 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
976 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
977 update_last_str = opts[b'update_last']
977 update_last_str = opts[b'update_last']
978 update_last = None
978 update_last = None
979 if update_last_str:
979 if update_last_str:
980 try:
980 try:
981 update_last = int(update_last_str)
981 update_last = int(update_last_str)
982 except ValueError:
982 except ValueError:
983 msg = b'could not parse value for update-last: "%s"'
983 msg = b'could not parse value for update-last: "%s"'
984 msg %= update_last_str
984 msg %= update_last_str
985 hint = b'value should be an integer'
985 hint = b'value should be an integer'
986 raise error.Abort(msg, hint=hint)
986 raise error.Abort(msg, hint=hint)
987
987
988 clear_disk_fn = getattr(
988 clear_disk_fn = getattr(
989 tags,
989 tags,
990 "clear_cache_on_disk",
990 "clear_cache_on_disk",
991 _default_clear_on_disk_tags_cache,
991 _default_clear_on_disk_tags_cache,
992 )
992 )
993 if getattr(tags, 'clear_cache_fnodes_is_working', False):
993 if getattr(tags, 'clear_cache_fnodes_is_working', False):
994 clear_fnodes_fn = tags.clear_cache_fnodes
994 clear_fnodes_fn = tags.clear_cache_fnodes
995 else:
995 else:
996 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
996 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
997 clear_fnodes_rev_fn = getattr(
997 clear_fnodes_rev_fn = getattr(
998 tags,
998 tags,
999 "forget_fnodes",
999 "forget_fnodes",
1000 _default_forget_fnodes,
1000 _default_forget_fnodes,
1001 )
1001 )
1002
1002
1003 clear_revs = []
1003 clear_revs = []
1004 if clear_fnode_revs:
1004 if clear_fnode_revs:
1005 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
1005 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
1006
1006
1007 if update_last:
1007 if update_last:
1008 revset = b'last(all(), %d)' % update_last
1008 revset = b'last(all(), %d)' % update_last
1009 last_revs = repo.unfiltered().revs(revset)
1009 last_revs = repo.unfiltered().revs(revset)
1010 clear_revs.extend(last_revs)
1010 clear_revs.extend(last_revs)
1011
1011
1012 from mercurial import repoview
1012 from mercurial import repoview
1013
1013
1014 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
1014 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
1015 with repo.ui.configoverride(rev_filter, source=b"perf"):
1015 with repo.ui.configoverride(rev_filter, source=b"perf"):
1016 filter_id = repoview.extrafilter(repo.ui)
1016 filter_id = repoview.extrafilter(repo.ui)
1017
1017
1018 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1018 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1019 pre_repo = repo.filtered(filter_name)
1019 pre_repo = repo.filtered(filter_name)
1020 pre_repo.tags() # warm the cache
1020 pre_repo.tags() # warm the cache
1021 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1021 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1022 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1022 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1023
1023
1024 clear_revs = sorted(set(clear_revs))
1024 clear_revs = sorted(set(clear_revs))
1025
1025
1026 def s():
1026 def s():
1027 if update_last:
1027 if update_last:
1028 util.copyfile(old_tags_path, new_tags_path)
1028 util.copyfile(old_tags_path, new_tags_path)
1029 if clearrevlogs:
1029 if clearrevlogs:
1030 clearchangelog(repo)
1030 clearchangelog(repo)
1031 clearfilecache(repo.unfiltered(), 'manifest')
1031 clearfilecache(repo.unfiltered(), 'manifest')
1032 if clear_disk:
1032 if clear_disk:
1033 clear_disk_fn(repo)
1033 clear_disk_fn(repo)
1034 if clear_fnode:
1034 if clear_fnode:
1035 clear_fnodes_fn(repo)
1035 clear_fnodes_fn(repo)
1036 elif clear_revs:
1036 elif clear_revs:
1037 clear_fnodes_rev_fn(repo, clear_revs)
1037 clear_fnodes_rev_fn(repo, clear_revs)
1038 repocleartagscache()
1038 repocleartagscache()
1039
1039
1040 def t():
1040 def t():
1041 len(repo.tags())
1041 len(repo.tags())
1042
1042
1043 timer(t, setup=s)
1043 timer(t, setup=s)
1044 fm.end()
1044 fm.end()
1045
1045
1046
1046
1047 @command(b'perf::ancestors|perfancestors', formatteropts)
1047 @command(b'perf::ancestors|perfancestors', formatteropts)
1048 def perfancestors(ui, repo, **opts):
1048 def perfancestors(ui, repo, **opts):
1049 opts = _byteskwargs(opts)
1049 opts = _byteskwargs(opts)
1050 timer, fm = gettimer(ui, opts)
1050 timer, fm = gettimer(ui, opts)
1051 heads = repo.changelog.headrevs()
1051 heads = repo.changelog.headrevs()
1052
1052
1053 def d():
1053 def d():
1054 for a in repo.changelog.ancestors(heads):
1054 for a in repo.changelog.ancestors(heads):
1055 pass
1055 pass
1056
1056
1057 timer(d)
1057 timer(d)
1058 fm.end()
1058 fm.end()
1059
1059
1060
1060
1061 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1061 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1062 def perfancestorset(ui, repo, revset, **opts):
1062 def perfancestorset(ui, repo, revset, **opts):
1063 opts = _byteskwargs(opts)
1063 opts = _byteskwargs(opts)
1064 timer, fm = gettimer(ui, opts)
1064 timer, fm = gettimer(ui, opts)
1065 revs = repo.revs(revset)
1065 revs = repo.revs(revset)
1066 heads = repo.changelog.headrevs()
1066 heads = repo.changelog.headrevs()
1067
1067
1068 def d():
1068 def d():
1069 s = repo.changelog.ancestors(heads)
1069 s = repo.changelog.ancestors(heads)
1070 for rev in revs:
1070 for rev in revs:
1071 rev in s
1071 rev in s
1072
1072
1073 timer(d)
1073 timer(d)
1074 fm.end()
1074 fm.end()
1075
1075
1076
1076
1077 @command(
1077 @command(
1078 b'perf::delta-find',
1078 b'perf::delta-find',
1079 revlogopts + formatteropts,
1079 revlogopts + formatteropts,
1080 b'-c|-m|FILE REV',
1080 b'-c|-m|FILE REV',
1081 )
1081 )
1082 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1082 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1083 """benchmark the process of finding a valid delta for a revlog revision
1083 """benchmark the process of finding a valid delta for a revlog revision
1084
1084
1085 When a revlog receives a new revision (e.g. from a commit, or from an
1085 When a revlog receives a new revision (e.g. from a commit, or from an
1086 incoming bundle), it searches for a suitable delta-base to produce a delta.
1086 incoming bundle), it searches for a suitable delta-base to produce a delta.
1087 This perf command measures how much time we spend in this process. It
1087 This perf command measures how much time we spend in this process. It
1088 operates on an already stored revision.
1088 operates on an already stored revision.
1089
1089
1090 See `hg help debug-delta-find` for another related command.
1090 See `hg help debug-delta-find` for another related command.
1091 """
1091 """
1092 from mercurial import revlogutils
1092 from mercurial import revlogutils
1093 import mercurial.revlogutils.deltas as deltautil
1093 import mercurial.revlogutils.deltas as deltautil
1094
1094
1095 opts = _byteskwargs(opts)
1095 opts = _byteskwargs(opts)
1096 if arg_2 is None:
1096 if arg_2 is None:
1097 file_ = None
1097 file_ = None
1098 rev = arg_1
1098 rev = arg_1
1099 else:
1099 else:
1100 file_ = arg_1
1100 file_ = arg_1
1101 rev = arg_2
1101 rev = arg_2
1102
1102
1103 repo = repo.unfiltered()
1103 repo = repo.unfiltered()
1104
1104
1105 timer, fm = gettimer(ui, opts)
1105 timer, fm = gettimer(ui, opts)
1106
1106
1107 rev = int(rev)
1107 rev = int(rev)
1108
1108
1109 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1109 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1110
1110
1111 deltacomputer = deltautil.deltacomputer(revlog)
1111 deltacomputer = deltautil.deltacomputer(revlog)
1112
1112
1113 node = revlog.node(rev)
1113 node = revlog.node(rev)
1114 p1r, p2r = revlog.parentrevs(rev)
1114 p1r, p2r = revlog.parentrevs(rev)
1115 p1 = revlog.node(p1r)
1115 p1 = revlog.node(p1r)
1116 p2 = revlog.node(p2r)
1116 p2 = revlog.node(p2r)
1117 full_text = revlog.revision(rev)
1117 full_text = revlog.revision(rev)
1118 textlen = len(full_text)
1118 textlen = len(full_text)
1119 cachedelta = None
1119 cachedelta = None
1120 flags = revlog.flags(rev)
1120 flags = revlog.flags(rev)
1121
1121
1122 revinfo = revlogutils.revisioninfo(
1122 revinfo = revlogutils.revisioninfo(
1123 node,
1123 node,
1124 p1,
1124 p1,
1125 p2,
1125 p2,
1126 [full_text], # btext
1126 [full_text], # btext
1127 textlen,
1127 textlen,
1128 cachedelta,
1128 cachedelta,
1129 flags,
1129 flags,
1130 )
1130 )
1131
1131
1132 # Note: we should probably purge the potential caches (like the full
1132 # Note: we should probably purge the potential caches (like the full
1133 # manifest cache) between runs.
1133 # manifest cache) between runs.
1134 def find_one():
1134 def find_one():
1135 with revlog._datafp() as fh:
1135 with revlog._datafp() as fh:
1136 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1136 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1137
1137
1138 timer(find_one)
1138 timer(find_one)
1139 fm.end()
1139 fm.end()
1140
1140
1141
1141
1142 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1142 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1143 def perfdiscovery(ui, repo, path, **opts):
1143 def perfdiscovery(ui, repo, path, **opts):
1144 """benchmark discovery between local repo and the peer at given path"""
1144 """benchmark discovery between local repo and the peer at given path"""
1145 repos = [repo, None]
1145 repos = [repo, None]
1146 timer, fm = gettimer(ui, opts)
1146 timer, fm = gettimer(ui, opts)
1147
1147
1148 try:
1148 try:
1149 from mercurial.utils.urlutil import get_unique_pull_path_obj
1149 from mercurial.utils.urlutil import get_unique_pull_path_obj
1150
1150
1151 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1151 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1152 except ImportError:
1152 except ImportError:
1153 try:
1153 try:
1154 from mercurial.utils.urlutil import get_unique_pull_path
1154 from mercurial.utils.urlutil import get_unique_pull_path
1155
1155
1156 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1156 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1157 except ImportError:
1157 except ImportError:
1158 path = ui.expandpath(path)
1158 path = ui.expandpath(path)
1159
1159
1160 def s():
1160 def s():
1161 repos[1] = hg.peer(ui, opts, path)
1161 repos[1] = hg.peer(ui, opts, path)
1162
1162
1163 def d():
1163 def d():
1164 setdiscovery.findcommonheads(ui, *repos)
1164 setdiscovery.findcommonheads(ui, *repos)
1165
1165
1166 timer(d, setup=s)
1166 timer(d, setup=s)
1167 fm.end()
1167 fm.end()
1168
1168
1169
1169
1170 @command(
1170 @command(
1171 b'perf::bookmarks|perfbookmarks',
1171 b'perf::bookmarks|perfbookmarks',
1172 formatteropts
1172 formatteropts
1173 + [
1173 + [
1174 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1174 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1175 ],
1175 ],
1176 )
1176 )
1177 def perfbookmarks(ui, repo, **opts):
1177 def perfbookmarks(ui, repo, **opts):
1178 """benchmark parsing bookmarks from disk to memory"""
1178 """benchmark parsing bookmarks from disk to memory"""
1179 opts = _byteskwargs(opts)
1179 opts = _byteskwargs(opts)
1180 timer, fm = gettimer(ui, opts)
1180 timer, fm = gettimer(ui, opts)
1181
1181
1182 clearrevlogs = opts[b'clear_revlogs']
1182 clearrevlogs = opts[b'clear_revlogs']
1183
1183
1184 def s():
1184 def s():
1185 if clearrevlogs:
1185 if clearrevlogs:
1186 clearchangelog(repo)
1186 clearchangelog(repo)
1187 clearfilecache(repo, b'_bookmarks')
1187 clearfilecache(repo, b'_bookmarks')
1188
1188
1189 def d():
1189 def d():
1190 repo._bookmarks
1190 repo._bookmarks
1191
1191
1192 timer(d, setup=s)
1192 timer(d, setup=s)
1193 fm.end()
1193 fm.end()
1194
1194
1195
1195
1196 @command(
1196 @command(
1197 b'perf::bundle',
1197 b'perf::bundle',
1198 [
1198 [
1199 (
1199 (
1200 b'r',
1200 b'r',
1201 b'rev',
1201 b'rev',
1202 [],
1202 [],
1203 b'changesets to bundle',
1203 b'changesets to bundle',
1204 b'REV',
1204 b'REV',
1205 ),
1205 ),
1206 (
1206 (
1207 b't',
1207 b't',
1208 b'type',
1208 b'type',
1209 b'none',
1209 b'none',
1210 b'bundlespec to use (see `hg help bundlespec`)',
1210 b'bundlespec to use (see `hg help bundlespec`)',
1211 b'TYPE',
1211 b'TYPE',
1212 ),
1212 ),
1213 ]
1213 ]
1214 + formatteropts,
1214 + formatteropts,
1215 b'REVS',
1215 b'REVS',
1216 )
1216 )
1217 def perfbundle(ui, repo, *revs, **opts):
1217 def perfbundle(ui, repo, *revs, **opts):
1218 """benchmark the creation of a bundle from a repository
1218 """benchmark the creation of a bundle from a repository
1219
1219
1220 For now, this only supports "none" compression.
1220 For now, this only supports "none" compression.
1221 """
1221 """
1222 try:
1222 try:
1223 from mercurial import bundlecaches
1223 from mercurial import bundlecaches
1224
1224
1225 parsebundlespec = bundlecaches.parsebundlespec
1225 parsebundlespec = bundlecaches.parsebundlespec
1226 except ImportError:
1226 except ImportError:
1227 from mercurial import exchange
1227 from mercurial import exchange
1228
1228
1229 parsebundlespec = exchange.parsebundlespec
1229 parsebundlespec = exchange.parsebundlespec
1230
1230
1231 from mercurial import discovery
1231 from mercurial import discovery
1232 from mercurial import bundle2
1232 from mercurial import bundle2
1233
1233
1234 opts = _byteskwargs(opts)
1234 opts = _byteskwargs(opts)
1235 timer, fm = gettimer(ui, opts)
1235 timer, fm = gettimer(ui, opts)
1236
1236
1237 cl = repo.changelog
1237 cl = repo.changelog
1238 revs = list(revs)
1238 revs = list(revs)
1239 revs.extend(opts.get(b'rev', ()))
1239 revs.extend(opts.get(b'rev', ()))
1240 revs = scmutil.revrange(repo, revs)
1240 revs = scmutil.revrange(repo, revs)
1241 if not revs:
1241 if not revs:
1242 raise error.Abort(b"not revision specified")
1242 raise error.Abort(b"not revision specified")
1243 # make it a consistent set (ie: without topological gaps)
1243 # make it a consistent set (ie: without topological gaps)
1244 old_len = len(revs)
1244 old_len = len(revs)
1245 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1245 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1246 if old_len != len(revs):
1246 if old_len != len(revs):
1247 new_count = len(revs) - old_len
1247 new_count = len(revs) - old_len
1248 msg = b"add %d new revisions to make it a consistent set\n"
1248 msg = b"add %d new revisions to make it a consistent set\n"
1249 ui.write_err(msg % new_count)
1249 ui.write_err(msg % new_count)
1250
1250
1251 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1251 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1252 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1252 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1253 outgoing = discovery.outgoing(repo, bases, targets)
1253 outgoing = discovery.outgoing(repo, bases, targets)
1254
1254
1255 bundle_spec = opts.get(b'type')
1255 bundle_spec = opts.get(b'type')
1256
1256
1257 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1257 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1258
1258
1259 cgversion = bundle_spec.params.get(b"cg.version")
1259 cgversion = bundle_spec.params.get(b"cg.version")
1260 if cgversion is None:
1260 if cgversion is None:
1261 if bundle_spec.version == b'v1':
1261 if bundle_spec.version == b'v1':
1262 cgversion = b'01'
1262 cgversion = b'01'
1263 if bundle_spec.version == b'v2':
1263 if bundle_spec.version == b'v2':
1264 cgversion = b'02'
1264 cgversion = b'02'
1265 if cgversion not in changegroup.supportedoutgoingversions(repo):
1265 if cgversion not in changegroup.supportedoutgoingversions(repo):
1266 err = b"repository does not support bundle version %s"
1266 err = b"repository does not support bundle version %s"
1267 raise error.Abort(err % cgversion)
1267 raise error.Abort(err % cgversion)
1268
1268
1269 if cgversion == b'01': # bundle1
1269 if cgversion == b'01': # bundle1
1270 bversion = b'HG10' + bundle_spec.wirecompression
1270 bversion = b'HG10' + bundle_spec.wirecompression
1271 bcompression = None
1271 bcompression = None
1272 elif cgversion in (b'02', b'03'):
1272 elif cgversion in (b'02', b'03'):
1273 bversion = b'HG20'
1273 bversion = b'HG20'
1274 bcompression = bundle_spec.wirecompression
1274 bcompression = bundle_spec.wirecompression
1275 else:
1275 else:
1276 err = b'perf::bundle: unexpected changegroup version %s'
1276 err = b'perf::bundle: unexpected changegroup version %s'
1277 raise error.ProgrammingError(err % cgversion)
1277 raise error.ProgrammingError(err % cgversion)
1278
1278
1279 if bcompression is None:
1279 if bcompression is None:
1280 bcompression = b'UN'
1280 bcompression = b'UN'
1281
1281
1282 if bcompression != b'UN':
1282 if bcompression != b'UN':
1283 err = b'perf::bundle: compression currently unsupported: %s'
1283 err = b'perf::bundle: compression currently unsupported: %s'
1284 raise error.ProgrammingError(err % bcompression)
1284 raise error.ProgrammingError(err % bcompression)
1285
1285
1286 def do_bundle():
1286 def do_bundle():
1287 bundle2.writenewbundle(
1287 bundle2.writenewbundle(
1288 ui,
1288 ui,
1289 repo,
1289 repo,
1290 b'perf::bundle',
1290 b'perf::bundle',
1291 os.devnull,
1291 os.devnull,
1292 bversion,
1292 bversion,
1293 outgoing,
1293 outgoing,
1294 bundle_spec.params,
1294 bundle_spec.params,
1295 )
1295 )
1296
1296
1297 timer(do_bundle)
1297 timer(do_bundle)
1298 fm.end()
1298 fm.end()
1299
1299
1300
1300
1301 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1301 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1302 def perfbundleread(ui, repo, bundlepath, **opts):
1302 def perfbundleread(ui, repo, bundlepath, **opts):
1303 """Benchmark reading of bundle files.
1303 """Benchmark reading of bundle files.
1304
1304
1305 This command is meant to isolate the I/O part of bundle reading as
1305 This command is meant to isolate the I/O part of bundle reading as
1306 much as possible.
1306 much as possible.
1307 """
1307 """
1308 from mercurial import (
1308 from mercurial import (
1309 bundle2,
1309 bundle2,
1310 exchange,
1310 exchange,
1311 streamclone,
1311 streamclone,
1312 )
1312 )
1313
1313
1314 opts = _byteskwargs(opts)
1314 opts = _byteskwargs(opts)
1315
1315
1316 def makebench(fn):
1316 def makebench(fn):
1317 def run():
1317 def run():
1318 with open(bundlepath, b'rb') as fh:
1318 with open(bundlepath, b'rb') as fh:
1319 bundle = exchange.readbundle(ui, fh, bundlepath)
1319 bundle = exchange.readbundle(ui, fh, bundlepath)
1320 fn(bundle)
1320 fn(bundle)
1321
1321
1322 return run
1322 return run
1323
1323
1324 def makereadnbytes(size):
1324 def makereadnbytes(size):
1325 def run():
1325 def run():
1326 with open(bundlepath, b'rb') as fh:
1326 with open(bundlepath, b'rb') as fh:
1327 bundle = exchange.readbundle(ui, fh, bundlepath)
1327 bundle = exchange.readbundle(ui, fh, bundlepath)
1328 while bundle.read(size):
1328 while bundle.read(size):
1329 pass
1329 pass
1330
1330
1331 return run
1331 return run
1332
1332
1333 def makestdioread(size):
1333 def makestdioread(size):
1334 def run():
1334 def run():
1335 with open(bundlepath, b'rb') as fh:
1335 with open(bundlepath, b'rb') as fh:
1336 while fh.read(size):
1336 while fh.read(size):
1337 pass
1337 pass
1338
1338
1339 return run
1339 return run
1340
1340
1341 # bundle1
1341 # bundle1
1342
1342
1343 def deltaiter(bundle):
1343 def deltaiter(bundle):
1344 for delta in bundle.deltaiter():
1344 for delta in bundle.deltaiter():
1345 pass
1345 pass
1346
1346
1347 def iterchunks(bundle):
1347 def iterchunks(bundle):
1348 for chunk in bundle.getchunks():
1348 for chunk in bundle.getchunks():
1349 pass
1349 pass
1350
1350
1351 # bundle2
1351 # bundle2
1352
1352
1353 def forwardchunks(bundle):
1353 def forwardchunks(bundle):
1354 for chunk in bundle._forwardchunks():
1354 for chunk in bundle._forwardchunks():
1355 pass
1355 pass
1356
1356
1357 def iterparts(bundle):
1357 def iterparts(bundle):
1358 for part in bundle.iterparts():
1358 for part in bundle.iterparts():
1359 pass
1359 pass
1360
1360
1361 def iterpartsseekable(bundle):
1361 def iterpartsseekable(bundle):
1362 for part in bundle.iterparts(seekable=True):
1362 for part in bundle.iterparts(seekable=True):
1363 pass
1363 pass
1364
1364
1365 def seek(bundle):
1365 def seek(bundle):
1366 for part in bundle.iterparts(seekable=True):
1366 for part in bundle.iterparts(seekable=True):
1367 part.seek(0, os.SEEK_END)
1367 part.seek(0, os.SEEK_END)
1368
1368
1369 def makepartreadnbytes(size):
1369 def makepartreadnbytes(size):
1370 def run():
1370 def run():
1371 with open(bundlepath, b'rb') as fh:
1371 with open(bundlepath, b'rb') as fh:
1372 bundle = exchange.readbundle(ui, fh, bundlepath)
1372 bundle = exchange.readbundle(ui, fh, bundlepath)
1373 for part in bundle.iterparts():
1373 for part in bundle.iterparts():
1374 while part.read(size):
1374 while part.read(size):
1375 pass
1375 pass
1376
1376
1377 return run
1377 return run
1378
1378
1379 benches = [
1379 benches = [
1380 (makestdioread(8192), b'read(8k)'),
1380 (makestdioread(8192), b'read(8k)'),
1381 (makestdioread(16384), b'read(16k)'),
1381 (makestdioread(16384), b'read(16k)'),
1382 (makestdioread(32768), b'read(32k)'),
1382 (makestdioread(32768), b'read(32k)'),
1383 (makestdioread(131072), b'read(128k)'),
1383 (makestdioread(131072), b'read(128k)'),
1384 ]
1384 ]
1385
1385
1386 with open(bundlepath, b'rb') as fh:
1386 with open(bundlepath, b'rb') as fh:
1387 bundle = exchange.readbundle(ui, fh, bundlepath)
1387 bundle = exchange.readbundle(ui, fh, bundlepath)
1388
1388
1389 if isinstance(bundle, changegroup.cg1unpacker):
1389 if isinstance(bundle, changegroup.cg1unpacker):
1390 benches.extend(
1390 benches.extend(
1391 [
1391 [
1392 (makebench(deltaiter), b'cg1 deltaiter()'),
1392 (makebench(deltaiter), b'cg1 deltaiter()'),
1393 (makebench(iterchunks), b'cg1 getchunks()'),
1393 (makebench(iterchunks), b'cg1 getchunks()'),
1394 (makereadnbytes(8192), b'cg1 read(8k)'),
1394 (makereadnbytes(8192), b'cg1 read(8k)'),
1395 (makereadnbytes(16384), b'cg1 read(16k)'),
1395 (makereadnbytes(16384), b'cg1 read(16k)'),
1396 (makereadnbytes(32768), b'cg1 read(32k)'),
1396 (makereadnbytes(32768), b'cg1 read(32k)'),
1397 (makereadnbytes(131072), b'cg1 read(128k)'),
1397 (makereadnbytes(131072), b'cg1 read(128k)'),
1398 ]
1398 ]
1399 )
1399 )
1400 elif isinstance(bundle, bundle2.unbundle20):
1400 elif isinstance(bundle, bundle2.unbundle20):
1401 benches.extend(
1401 benches.extend(
1402 [
1402 [
1403 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1403 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1404 (makebench(iterparts), b'bundle2 iterparts()'),
1404 (makebench(iterparts), b'bundle2 iterparts()'),
1405 (
1405 (
1406 makebench(iterpartsseekable),
1406 makebench(iterpartsseekable),
1407 b'bundle2 iterparts() seekable',
1407 b'bundle2 iterparts() seekable',
1408 ),
1408 ),
1409 (makebench(seek), b'bundle2 part seek()'),
1409 (makebench(seek), b'bundle2 part seek()'),
1410 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1410 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1411 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1411 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1412 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1412 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1413 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1413 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1414 ]
1414 ]
1415 )
1415 )
1416 elif isinstance(bundle, streamclone.streamcloneapplier):
1416 elif isinstance(bundle, streamclone.streamcloneapplier):
1417 raise error.Abort(b'stream clone bundles not supported')
1417 raise error.Abort(b'stream clone bundles not supported')
1418 else:
1418 else:
1419 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1419 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1420
1420
1421 for fn, title in benches:
1421 for fn, title in benches:
1422 timer, fm = gettimer(ui, opts)
1422 timer, fm = gettimer(ui, opts)
1423 timer(fn, title=title)
1423 timer(fn, title=title)
1424 fm.end()
1424 fm.end()
1425
1425
1426
1426
1427 @command(
1427 @command(
1428 b'perf::changegroupchangelog|perfchangegroupchangelog',
1428 b'perf::changegroupchangelog|perfchangegroupchangelog',
1429 formatteropts
1429 formatteropts
1430 + [
1430 + [
1431 (b'', b'cgversion', b'02', b'changegroup version'),
1431 (b'', b'cgversion', b'02', b'changegroup version'),
1432 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1432 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1433 ],
1433 ],
1434 )
1434 )
1435 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1435 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1436 """Benchmark producing a changelog group for a changegroup.
1436 """Benchmark producing a changelog group for a changegroup.
1437
1437
1438 This measures the time spent processing the changelog during a
1438 This measures the time spent processing the changelog during a
1439 bundle operation. This occurs during `hg bundle` and on a server
1439 bundle operation. This occurs during `hg bundle` and on a server
1440 processing a `getbundle` wire protocol request (handles clones
1440 processing a `getbundle` wire protocol request (handles clones
1441 and pull requests).
1441 and pull requests).
1442
1442
1443 By default, all revisions are added to the changegroup.
1443 By default, all revisions are added to the changegroup.
1444 """
1444 """
1445 opts = _byteskwargs(opts)
1445 opts = _byteskwargs(opts)
1446 cl = repo.changelog
1446 cl = repo.changelog
1447 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1447 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1448 bundler = changegroup.getbundler(cgversion, repo)
1448 bundler = changegroup.getbundler(cgversion, repo)
1449
1449
1450 def d():
1450 def d():
1451 state, chunks = bundler._generatechangelog(cl, nodes)
1451 state, chunks = bundler._generatechangelog(cl, nodes)
1452 for chunk in chunks:
1452 for chunk in chunks:
1453 pass
1453 pass
1454
1454
1455 timer, fm = gettimer(ui, opts)
1455 timer, fm = gettimer(ui, opts)
1456
1456
1457 # Terminal printing can interfere with timing. So disable it.
1457 # Terminal printing can interfere with timing. So disable it.
1458 with ui.configoverride({(b'progress', b'disable'): True}):
1458 with ui.configoverride({(b'progress', b'disable'): True}):
1459 timer(d)
1459 timer(d)
1460
1460
1461 fm.end()
1461 fm.end()
1462
1462
1463
1463
1464 @command(b'perf::dirs|perfdirs', formatteropts)
1464 @command(b'perf::dirs|perfdirs', formatteropts)
1465 def perfdirs(ui, repo, **opts):
1465 def perfdirs(ui, repo, **opts):
1466 opts = _byteskwargs(opts)
1466 opts = _byteskwargs(opts)
1467 timer, fm = gettimer(ui, opts)
1467 timer, fm = gettimer(ui, opts)
1468 dirstate = repo.dirstate
1468 dirstate = repo.dirstate
1469 b'a' in dirstate
1469 b'a' in dirstate
1470
1470
1471 def d():
1471 def d():
1472 dirstate.hasdir(b'a')
1472 dirstate.hasdir(b'a')
1473 try:
1473 try:
1474 del dirstate._map._dirs
1474 del dirstate._map._dirs
1475 except AttributeError:
1475 except AttributeError:
1476 pass
1476 pass
1477
1477
1478 timer(d)
1478 timer(d)
1479 fm.end()
1479 fm.end()
1480
1480
1481
1481
1482 @command(
1482 @command(
1483 b'perf::dirstate|perfdirstate',
1483 b'perf::dirstate|perfdirstate',
1484 [
1484 [
1485 (
1485 (
1486 b'',
1486 b'',
1487 b'iteration',
1487 b'iteration',
1488 None,
1488 None,
1489 b'benchmark a full iteration for the dirstate',
1489 b'benchmark a full iteration for the dirstate',
1490 ),
1490 ),
1491 (
1491 (
1492 b'',
1492 b'',
1493 b'contains',
1493 b'contains',
1494 None,
1494 None,
1495 b'benchmark a large amount of `nf in dirstate` calls',
1495 b'benchmark a large amount of `nf in dirstate` calls',
1496 ),
1496 ),
1497 ]
1497 ]
1498 + formatteropts,
1498 + formatteropts,
1499 )
1499 )
1500 def perfdirstate(ui, repo, **opts):
1500 def perfdirstate(ui, repo, **opts):
1501 """benchmap the time of various distate operations
1501 """benchmap the time of various distate operations
1502
1502
1503 By default benchmark the time necessary to load a dirstate from scratch.
1503 By default benchmark the time necessary to load a dirstate from scratch.
1504 The dirstate is loaded to the point were a "contains" request can be
1504 The dirstate is loaded to the point were a "contains" request can be
1505 answered.
1505 answered.
1506 """
1506 """
1507 opts = _byteskwargs(opts)
1507 opts = _byteskwargs(opts)
1508 timer, fm = gettimer(ui, opts)
1508 timer, fm = gettimer(ui, opts)
1509 b"a" in repo.dirstate
1509 b"a" in repo.dirstate
1510
1510
1511 if opts[b'iteration'] and opts[b'contains']:
1511 if opts[b'iteration'] and opts[b'contains']:
1512 msg = b'only specify one of --iteration or --contains'
1512 msg = b'only specify one of --iteration or --contains'
1513 raise error.Abort(msg)
1513 raise error.Abort(msg)
1514
1514
1515 if opts[b'iteration']:
1515 if opts[b'iteration']:
1516 setup = None
1516 setup = None
1517 dirstate = repo.dirstate
1517 dirstate = repo.dirstate
1518
1518
1519 def d():
1519 def d():
1520 for f in dirstate:
1520 for f in dirstate:
1521 pass
1521 pass
1522
1522
1523 elif opts[b'contains']:
1523 elif opts[b'contains']:
1524 setup = None
1524 setup = None
1525 dirstate = repo.dirstate
1525 dirstate = repo.dirstate
1526 allfiles = list(dirstate)
1526 allfiles = list(dirstate)
1527 # also add file path that will be "missing" from the dirstate
1527 # also add file path that will be "missing" from the dirstate
1528 allfiles.extend([f[::-1] for f in allfiles])
1528 allfiles.extend([f[::-1] for f in allfiles])
1529
1529
1530 def d():
1530 def d():
1531 for f in allfiles:
1531 for f in allfiles:
1532 f in dirstate
1532 f in dirstate
1533
1533
1534 else:
1534 else:
1535
1535
1536 def setup():
1536 def setup():
1537 repo.dirstate.invalidate()
1537 repo.dirstate.invalidate()
1538
1538
1539 def d():
1539 def d():
1540 b"a" in repo.dirstate
1540 b"a" in repo.dirstate
1541
1541
1542 timer(d, setup=setup)
1542 timer(d, setup=setup)
1543 fm.end()
1543 fm.end()
1544
1544
1545
1545
1546 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1546 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1547 def perfdirstatedirs(ui, repo, **opts):
1547 def perfdirstatedirs(ui, repo, **opts):
1548 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1548 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1549 opts = _byteskwargs(opts)
1549 opts = _byteskwargs(opts)
1550 timer, fm = gettimer(ui, opts)
1550 timer, fm = gettimer(ui, opts)
1551 repo.dirstate.hasdir(b"a")
1551 repo.dirstate.hasdir(b"a")
1552
1552
1553 def setup():
1553 def setup():
1554 try:
1554 try:
1555 del repo.dirstate._map._dirs
1555 del repo.dirstate._map._dirs
1556 except AttributeError:
1556 except AttributeError:
1557 pass
1557 pass
1558
1558
1559 def d():
1559 def d():
1560 repo.dirstate.hasdir(b"a")
1560 repo.dirstate.hasdir(b"a")
1561
1561
1562 timer(d, setup=setup)
1562 timer(d, setup=setup)
1563 fm.end()
1563 fm.end()
1564
1564
1565
1565
1566 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1566 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1567 def perfdirstatefoldmap(ui, repo, **opts):
1567 def perfdirstatefoldmap(ui, repo, **opts):
1568 """benchmap a `dirstate._map.filefoldmap.get()` request
1568 """benchmap a `dirstate._map.filefoldmap.get()` request
1569
1569
1570 The dirstate filefoldmap cache is dropped between every request.
1570 The dirstate filefoldmap cache is dropped between every request.
1571 """
1571 """
1572 opts = _byteskwargs(opts)
1572 opts = _byteskwargs(opts)
1573 timer, fm = gettimer(ui, opts)
1573 timer, fm = gettimer(ui, opts)
1574 dirstate = repo.dirstate
1574 dirstate = repo.dirstate
1575 dirstate._map.filefoldmap.get(b'a')
1575 dirstate._map.filefoldmap.get(b'a')
1576
1576
1577 def setup():
1577 def setup():
1578 del dirstate._map.filefoldmap
1578 del dirstate._map.filefoldmap
1579
1579
1580 def d():
1580 def d():
1581 dirstate._map.filefoldmap.get(b'a')
1581 dirstate._map.filefoldmap.get(b'a')
1582
1582
1583 timer(d, setup=setup)
1583 timer(d, setup=setup)
1584 fm.end()
1584 fm.end()
1585
1585
1586
1586
1587 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1587 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1588 def perfdirfoldmap(ui, repo, **opts):
1588 def perfdirfoldmap(ui, repo, **opts):
1589 """benchmap a `dirstate._map.dirfoldmap.get()` request
1589 """benchmap a `dirstate._map.dirfoldmap.get()` request
1590
1590
1591 The dirstate dirfoldmap cache is dropped between every request.
1591 The dirstate dirfoldmap cache is dropped between every request.
1592 """
1592 """
1593 opts = _byteskwargs(opts)
1593 opts = _byteskwargs(opts)
1594 timer, fm = gettimer(ui, opts)
1594 timer, fm = gettimer(ui, opts)
1595 dirstate = repo.dirstate
1595 dirstate = repo.dirstate
1596 dirstate._map.dirfoldmap.get(b'a')
1596 dirstate._map.dirfoldmap.get(b'a')
1597
1597
1598 def setup():
1598 def setup():
1599 del dirstate._map.dirfoldmap
1599 del dirstate._map.dirfoldmap
1600 try:
1600 try:
1601 del dirstate._map._dirs
1601 del dirstate._map._dirs
1602 except AttributeError:
1602 except AttributeError:
1603 pass
1603 pass
1604
1604
1605 def d():
1605 def d():
1606 dirstate._map.dirfoldmap.get(b'a')
1606 dirstate._map.dirfoldmap.get(b'a')
1607
1607
1608 timer(d, setup=setup)
1608 timer(d, setup=setup)
1609 fm.end()
1609 fm.end()
1610
1610
1611
1611
1612 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1612 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1613 def perfdirstatewrite(ui, repo, **opts):
1613 def perfdirstatewrite(ui, repo, **opts):
1614 """benchmap the time it take to write a dirstate on disk"""
1614 """benchmap the time it take to write a dirstate on disk"""
1615 opts = _byteskwargs(opts)
1615 opts = _byteskwargs(opts)
1616 timer, fm = gettimer(ui, opts)
1616 timer, fm = gettimer(ui, opts)
1617 ds = repo.dirstate
1617 ds = repo.dirstate
1618 b"a" in ds
1618 b"a" in ds
1619
1619
1620 def setup():
1620 def setup():
1621 ds._dirty = True
1621 ds._dirty = True
1622
1622
1623 def d():
1623 def d():
1624 ds.write(repo.currenttransaction())
1624 ds.write(repo.currenttransaction())
1625
1625
1626 with repo.wlock():
1626 with repo.wlock():
1627 timer(d, setup=setup)
1627 timer(d, setup=setup)
1628 fm.end()
1628 fm.end()
1629
1629
1630
1630
1631 def _getmergerevs(repo, opts):
1631 def _getmergerevs(repo, opts):
1632 """parse command argument to return rev involved in merge
1632 """parse command argument to return rev involved in merge
1633
1633
1634 input: options dictionnary with `rev`, `from` and `bse`
1634 input: options dictionnary with `rev`, `from` and `bse`
1635 output: (localctx, otherctx, basectx)
1635 output: (localctx, otherctx, basectx)
1636 """
1636 """
1637 if opts[b'from']:
1637 if opts[b'from']:
1638 fromrev = scmutil.revsingle(repo, opts[b'from'])
1638 fromrev = scmutil.revsingle(repo, opts[b'from'])
1639 wctx = repo[fromrev]
1639 wctx = repo[fromrev]
1640 else:
1640 else:
1641 wctx = repo[None]
1641 wctx = repo[None]
1642 # we don't want working dir files to be stat'd in the benchmark, so
1642 # we don't want working dir files to be stat'd in the benchmark, so
1643 # prime that cache
1643 # prime that cache
1644 wctx.dirty()
1644 wctx.dirty()
1645 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1645 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1646 if opts[b'base']:
1646 if opts[b'base']:
1647 fromrev = scmutil.revsingle(repo, opts[b'base'])
1647 fromrev = scmutil.revsingle(repo, opts[b'base'])
1648 ancestor = repo[fromrev]
1648 ancestor = repo[fromrev]
1649 else:
1649 else:
1650 ancestor = wctx.ancestor(rctx)
1650 ancestor = wctx.ancestor(rctx)
1651 return (wctx, rctx, ancestor)
1651 return (wctx, rctx, ancestor)
1652
1652
1653
1653
1654 @command(
1654 @command(
1655 b'perf::mergecalculate|perfmergecalculate',
1655 b'perf::mergecalculate|perfmergecalculate',
1656 [
1656 [
1657 (b'r', b'rev', b'.', b'rev to merge against'),
1657 (b'r', b'rev', b'.', b'rev to merge against'),
1658 (b'', b'from', b'', b'rev to merge from'),
1658 (b'', b'from', b'', b'rev to merge from'),
1659 (b'', b'base', b'', b'the revision to use as base'),
1659 (b'', b'base', b'', b'the revision to use as base'),
1660 ]
1660 ]
1661 + formatteropts,
1661 + formatteropts,
1662 )
1662 )
1663 def perfmergecalculate(ui, repo, **opts):
1663 def perfmergecalculate(ui, repo, **opts):
1664 opts = _byteskwargs(opts)
1664 opts = _byteskwargs(opts)
1665 timer, fm = gettimer(ui, opts)
1665 timer, fm = gettimer(ui, opts)
1666
1666
1667 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1667 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1668
1668
1669 def d():
1669 def d():
1670 # acceptremote is True because we don't want prompts in the middle of
1670 # acceptremote is True because we don't want prompts in the middle of
1671 # our benchmark
1671 # our benchmark
1672 merge.calculateupdates(
1672 merge.calculateupdates(
1673 repo,
1673 repo,
1674 wctx,
1674 wctx,
1675 rctx,
1675 rctx,
1676 [ancestor],
1676 [ancestor],
1677 branchmerge=False,
1677 branchmerge=False,
1678 force=False,
1678 force=False,
1679 acceptremote=True,
1679 acceptremote=True,
1680 followcopies=True,
1680 followcopies=True,
1681 )
1681 )
1682
1682
1683 timer(d)
1683 timer(d)
1684 fm.end()
1684 fm.end()
1685
1685
1686
1686
1687 @command(
1687 @command(
1688 b'perf::mergecopies|perfmergecopies',
1688 b'perf::mergecopies|perfmergecopies',
1689 [
1689 [
1690 (b'r', b'rev', b'.', b'rev to merge against'),
1690 (b'r', b'rev', b'.', b'rev to merge against'),
1691 (b'', b'from', b'', b'rev to merge from'),
1691 (b'', b'from', b'', b'rev to merge from'),
1692 (b'', b'base', b'', b'the revision to use as base'),
1692 (b'', b'base', b'', b'the revision to use as base'),
1693 ]
1693 ]
1694 + formatteropts,
1694 + formatteropts,
1695 )
1695 )
1696 def perfmergecopies(ui, repo, **opts):
1696 def perfmergecopies(ui, repo, **opts):
1697 """measure runtime of `copies.mergecopies`"""
1697 """measure runtime of `copies.mergecopies`"""
1698 opts = _byteskwargs(opts)
1698 opts = _byteskwargs(opts)
1699 timer, fm = gettimer(ui, opts)
1699 timer, fm = gettimer(ui, opts)
1700 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1700 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1701
1701
1702 def d():
1702 def d():
1703 # acceptremote is True because we don't want prompts in the middle of
1703 # acceptremote is True because we don't want prompts in the middle of
1704 # our benchmark
1704 # our benchmark
1705 copies.mergecopies(repo, wctx, rctx, ancestor)
1705 copies.mergecopies(repo, wctx, rctx, ancestor)
1706
1706
1707 timer(d)
1707 timer(d)
1708 fm.end()
1708 fm.end()
1709
1709
1710
1710
1711 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1711 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1712 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1712 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1713 """benchmark the copy tracing logic"""
1713 """benchmark the copy tracing logic"""
1714 opts = _byteskwargs(opts)
1714 opts = _byteskwargs(opts)
1715 timer, fm = gettimer(ui, opts)
1715 timer, fm = gettimer(ui, opts)
1716 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1716 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1717 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1717 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1718
1718
1719 def d():
1719 def d():
1720 copies.pathcopies(ctx1, ctx2)
1720 copies.pathcopies(ctx1, ctx2)
1721
1721
1722 timer(d)
1722 timer(d)
1723 fm.end()
1723 fm.end()
1724
1724
1725
1725
1726 @command(
1726 @command(
1727 b'perf::phases|perfphases',
1727 b'perf::phases|perfphases',
1728 [
1728 [
1729 (b'', b'full', False, b'include file reading time too'),
1729 (b'', b'full', False, b'include file reading time too'),
1730 ]
1730 ]
1731 + formatteropts,
1731 + formatteropts,
1732 b"",
1732 b"",
1733 )
1733 )
1734 def perfphases(ui, repo, **opts):
1734 def perfphases(ui, repo, **opts):
1735 """benchmark phasesets computation"""
1735 """benchmark phasesets computation"""
1736 opts = _byteskwargs(opts)
1736 opts = _byteskwargs(opts)
1737 timer, fm = gettimer(ui, opts)
1737 timer, fm = gettimer(ui, opts)
1738 _phases = repo._phasecache
1738 _phases = repo._phasecache
1739 full = opts.get(b'full')
1739 full = opts.get(b'full')
1740 tip_rev = repo.changelog.tiprev()
1740 tip_rev = repo.changelog.tiprev()
1741
1741
1742 def d():
1742 def d():
1743 phases = _phases
1743 phases = _phases
1744 if full:
1744 if full:
1745 clearfilecache(repo, b'_phasecache')
1745 clearfilecache(repo, b'_phasecache')
1746 phases = repo._phasecache
1746 phases = repo._phasecache
1747 phases.invalidate()
1747 phases.invalidate()
1748 phases.phase(repo, tip_rev)
1748 phases.phase(repo, tip_rev)
1749
1749
1750 timer(d)
1750 timer(d)
1751 fm.end()
1751 fm.end()
1752
1752
1753
1753
1754 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1754 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1755 def perfphasesremote(ui, repo, dest=None, **opts):
1755 def perfphasesremote(ui, repo, dest=None, **opts):
1756 """benchmark time needed to analyse phases of the remote server"""
1756 """benchmark time needed to analyse phases of the remote server"""
1757 from mercurial.node import bin
1757 from mercurial.node import bin
1758 from mercurial import (
1758 from mercurial import (
1759 exchange,
1759 exchange,
1760 hg,
1760 hg,
1761 phases,
1761 phases,
1762 )
1762 )
1763
1763
1764 opts = _byteskwargs(opts)
1764 opts = _byteskwargs(opts)
1765 timer, fm = gettimer(ui, opts)
1765 timer, fm = gettimer(ui, opts)
1766
1766
1767 path = ui.getpath(dest, default=(b'default-push', b'default'))
1767 path = ui.getpath(dest, default=(b'default-push', b'default'))
1768 if not path:
1768 if not path:
1769 raise error.Abort(
1769 raise error.Abort(
1770 b'default repository not configured!',
1770 b'default repository not configured!',
1771 hint=b"see 'hg help config.paths'",
1771 hint=b"see 'hg help config.paths'",
1772 )
1772 )
1773 if util.safehasattr(path, 'main_path'):
1773 if util.safehasattr(path, 'main_path'):
1774 path = path.get_push_variant()
1774 path = path.get_push_variant()
1775 dest = path.loc
1775 dest = path.loc
1776 else:
1776 else:
1777 dest = path.pushloc or path.loc
1777 dest = path.pushloc or path.loc
1778 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1778 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1779 other = hg.peer(repo, opts, dest)
1779 other = hg.peer(repo, opts, dest)
1780
1780
1781 # easier to perform discovery through the operation
1781 # easier to perform discovery through the operation
1782 op = exchange.pushoperation(repo, other)
1782 op = exchange.pushoperation(repo, other)
1783 exchange._pushdiscoverychangeset(op)
1783 exchange._pushdiscoverychangeset(op)
1784
1784
1785 remotesubset = op.fallbackheads
1785 remotesubset = op.fallbackheads
1786
1786
1787 with other.commandexecutor() as e:
1787 with other.commandexecutor() as e:
1788 remotephases = e.callcommand(
1788 remotephases = e.callcommand(
1789 b'listkeys', {b'namespace': b'phases'}
1789 b'listkeys', {b'namespace': b'phases'}
1790 ).result()
1790 ).result()
1791 del other
1791 del other
1792 publishing = remotephases.get(b'publishing', False)
1792 publishing = remotephases.get(b'publishing', False)
1793 if publishing:
1793 if publishing:
1794 ui.statusnoi18n(b'publishing: yes\n')
1794 ui.statusnoi18n(b'publishing: yes\n')
1795 else:
1795 else:
1796 ui.statusnoi18n(b'publishing: no\n')
1796 ui.statusnoi18n(b'publishing: no\n')
1797
1797
1798 has_node = getattr(repo.changelog.index, 'has_node', None)
1798 has_node = getattr(repo.changelog.index, 'has_node', None)
1799 if has_node is None:
1799 if has_node is None:
1800 has_node = repo.changelog.nodemap.__contains__
1800 has_node = repo.changelog.nodemap.__contains__
1801 nonpublishroots = 0
1801 nonpublishroots = 0
1802 for nhex, phase in remotephases.iteritems():
1802 for nhex, phase in remotephases.iteritems():
1803 if nhex == b'publishing': # ignore data related to publish option
1803 if nhex == b'publishing': # ignore data related to publish option
1804 continue
1804 continue
1805 node = bin(nhex)
1805 node = bin(nhex)
1806 if has_node(node) and int(phase):
1806 if has_node(node) and int(phase):
1807 nonpublishroots += 1
1807 nonpublishroots += 1
1808 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1808 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1809 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1809 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1810
1810
1811 def d():
1811 def d():
1812 phases.remotephasessummary(repo, remotesubset, remotephases)
1812 phases.remotephasessummary(repo, remotesubset, remotephases)
1813
1813
1814 timer(d)
1814 timer(d)
1815 fm.end()
1815 fm.end()
1816
1816
1817
1817
1818 @command(
1818 @command(
1819 b'perf::manifest|perfmanifest',
1819 b'perf::manifest|perfmanifest',
1820 [
1820 [
1821 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1821 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1822 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1822 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1823 ]
1823 ]
1824 + formatteropts,
1824 + formatteropts,
1825 b'REV|NODE',
1825 b'REV|NODE',
1826 )
1826 )
1827 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1827 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1828 """benchmark the time to read a manifest from disk and return a usable
1828 """benchmark the time to read a manifest from disk and return a usable
1829 dict-like object
1829 dict-like object
1830
1830
1831 Manifest caches are cleared before retrieval."""
1831 Manifest caches are cleared before retrieval."""
1832 opts = _byteskwargs(opts)
1832 opts = _byteskwargs(opts)
1833 timer, fm = gettimer(ui, opts)
1833 timer, fm = gettimer(ui, opts)
1834 if not manifest_rev:
1834 if not manifest_rev:
1835 ctx = scmutil.revsingle(repo, rev, rev)
1835 ctx = scmutil.revsingle(repo, rev, rev)
1836 t = ctx.manifestnode()
1836 t = ctx.manifestnode()
1837 else:
1837 else:
1838 from mercurial.node import bin
1838 from mercurial.node import bin
1839
1839
1840 if len(rev) == 40:
1840 if len(rev) == 40:
1841 t = bin(rev)
1841 t = bin(rev)
1842 else:
1842 else:
1843 try:
1843 try:
1844 rev = int(rev)
1844 rev = int(rev)
1845
1845
1846 if util.safehasattr(repo.manifestlog, b'getstorage'):
1846 if util.safehasattr(repo.manifestlog, b'getstorage'):
1847 t = repo.manifestlog.getstorage(b'').node(rev)
1847 t = repo.manifestlog.getstorage(b'').node(rev)
1848 else:
1848 else:
1849 t = repo.manifestlog._revlog.lookup(rev)
1849 t = repo.manifestlog._revlog.lookup(rev)
1850 except ValueError:
1850 except ValueError:
1851 raise error.Abort(
1851 raise error.Abort(
1852 b'manifest revision must be integer or full node'
1852 b'manifest revision must be integer or full node'
1853 )
1853 )
1854
1854
1855 def d():
1855 def d():
1856 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1856 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1857 repo.manifestlog[t].read()
1857 repo.manifestlog[t].read()
1858
1858
1859 timer(d)
1859 timer(d)
1860 fm.end()
1860 fm.end()
1861
1861
1862
1862
1863 @command(b'perf::changeset|perfchangeset', formatteropts)
1863 @command(b'perf::changeset|perfchangeset', formatteropts)
1864 def perfchangeset(ui, repo, rev, **opts):
1864 def perfchangeset(ui, repo, rev, **opts):
1865 opts = _byteskwargs(opts)
1865 opts = _byteskwargs(opts)
1866 timer, fm = gettimer(ui, opts)
1866 timer, fm = gettimer(ui, opts)
1867 n = scmutil.revsingle(repo, rev).node()
1867 n = scmutil.revsingle(repo, rev).node()
1868
1868
1869 def d():
1869 def d():
1870 repo.changelog.read(n)
1870 repo.changelog.read(n)
1871 # repo.changelog._cache = None
1871 # repo.changelog._cache = None
1872
1872
1873 timer(d)
1873 timer(d)
1874 fm.end()
1874 fm.end()
1875
1875
1876
1876
1877 @command(b'perf::ignore|perfignore', formatteropts)
1877 @command(b'perf::ignore|perfignore', formatteropts)
1878 def perfignore(ui, repo, **opts):
1878 def perfignore(ui, repo, **opts):
1879 """benchmark operation related to computing ignore"""
1879 """benchmark operation related to computing ignore"""
1880 opts = _byteskwargs(opts)
1880 opts = _byteskwargs(opts)
1881 timer, fm = gettimer(ui, opts)
1881 timer, fm = gettimer(ui, opts)
1882 dirstate = repo.dirstate
1882 dirstate = repo.dirstate
1883
1883
1884 def setupone():
1884 def setupone():
1885 dirstate.invalidate()
1885 dirstate.invalidate()
1886 clearfilecache(dirstate, b'_ignore')
1886 clearfilecache(dirstate, b'_ignore')
1887
1887
1888 def runone():
1888 def runone():
1889 dirstate._ignore
1889 dirstate._ignore
1890
1890
1891 timer(runone, setup=setupone, title=b"load")
1891 timer(runone, setup=setupone, title=b"load")
1892 fm.end()
1892 fm.end()
1893
1893
1894
1894
1895 @command(
1895 @command(
1896 b'perf::index|perfindex',
1896 b'perf::index|perfindex',
1897 [
1897 [
1898 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1898 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1899 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1899 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1900 ]
1900 ]
1901 + formatteropts,
1901 + formatteropts,
1902 )
1902 )
1903 def perfindex(ui, repo, **opts):
1903 def perfindex(ui, repo, **opts):
1904 """benchmark index creation time followed by a lookup
1904 """benchmark index creation time followed by a lookup
1905
1905
1906 The default is to look `tip` up. Depending on the index implementation,
1906 The default is to look `tip` up. Depending on the index implementation,
1907 the revision looked up can matters. For example, an implementation
1907 the revision looked up can matters. For example, an implementation
1908 scanning the index will have a faster lookup time for `--rev tip` than for
1908 scanning the index will have a faster lookup time for `--rev tip` than for
1909 `--rev 0`. The number of looked up revisions and their order can also
1909 `--rev 0`. The number of looked up revisions and their order can also
1910 matters.
1910 matters.
1911
1911
1912 Example of useful set to test:
1912 Example of useful set to test:
1913
1913
1914 * tip
1914 * tip
1915 * 0
1915 * 0
1916 * -10:
1916 * -10:
1917 * :10
1917 * :10
1918 * -10: + :10
1918 * -10: + :10
1919 * :10: + -10:
1919 * :10: + -10:
1920 * -10000:
1920 * -10000:
1921 * -10000: + 0
1921 * -10000: + 0
1922
1922
1923 It is not currently possible to check for lookup of a missing node. For
1923 It is not currently possible to check for lookup of a missing node. For
1924 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1924 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1925 import mercurial.revlog
1925 import mercurial.revlog
1926
1926
1927 opts = _byteskwargs(opts)
1927 opts = _byteskwargs(opts)
1928 timer, fm = gettimer(ui, opts)
1928 timer, fm = gettimer(ui, opts)
1929 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1929 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1930 if opts[b'no_lookup']:
1930 if opts[b'no_lookup']:
1931 if opts['rev']:
1931 if opts['rev']:
1932 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1932 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1933 nodes = []
1933 nodes = []
1934 elif not opts[b'rev']:
1934 elif not opts[b'rev']:
1935 nodes = [repo[b"tip"].node()]
1935 nodes = [repo[b"tip"].node()]
1936 else:
1936 else:
1937 revs = scmutil.revrange(repo, opts[b'rev'])
1937 revs = scmutil.revrange(repo, opts[b'rev'])
1938 cl = repo.changelog
1938 cl = repo.changelog
1939 nodes = [cl.node(r) for r in revs]
1939 nodes = [cl.node(r) for r in revs]
1940
1940
1941 unfi = repo.unfiltered()
1941 unfi = repo.unfiltered()
1942 # find the filecache func directly
1942 # find the filecache func directly
1943 # This avoid polluting the benchmark with the filecache logic
1943 # This avoid polluting the benchmark with the filecache logic
1944 makecl = unfi.__class__.changelog.func
1944 makecl = unfi.__class__.changelog.func
1945
1945
1946 def setup():
1946 def setup():
1947 # probably not necessary, but for good measure
1947 # probably not necessary, but for good measure
1948 clearchangelog(unfi)
1948 clearchangelog(unfi)
1949
1949
1950 def d():
1950 def d():
1951 cl = makecl(unfi)
1951 cl = makecl(unfi)
1952 for n in nodes:
1952 for n in nodes:
1953 cl.rev(n)
1953 cl.rev(n)
1954
1954
1955 timer(d, setup=setup)
1955 timer(d, setup=setup)
1956 fm.end()
1956 fm.end()
1957
1957
1958
1958
1959 @command(
1959 @command(
1960 b'perf::nodemap|perfnodemap',
1960 b'perf::nodemap|perfnodemap',
1961 [
1961 [
1962 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1962 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1963 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1963 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1964 ]
1964 ]
1965 + formatteropts,
1965 + formatteropts,
1966 )
1966 )
1967 def perfnodemap(ui, repo, **opts):
1967 def perfnodemap(ui, repo, **opts):
1968 """benchmark the time necessary to look up revision from a cold nodemap
1968 """benchmark the time necessary to look up revision from a cold nodemap
1969
1969
1970 Depending on the implementation, the amount and order of revision we look
1970 Depending on the implementation, the amount and order of revision we look
1971 up can varies. Example of useful set to test:
1971 up can varies. Example of useful set to test:
1972 * tip
1972 * tip
1973 * 0
1973 * 0
1974 * -10:
1974 * -10:
1975 * :10
1975 * :10
1976 * -10: + :10
1976 * -10: + :10
1977 * :10: + -10:
1977 * :10: + -10:
1978 * -10000:
1978 * -10000:
1979 * -10000: + 0
1979 * -10000: + 0
1980
1980
1981 The command currently focus on valid binary lookup. Benchmarking for
1981 The command currently focus on valid binary lookup. Benchmarking for
1982 hexlookup, prefix lookup and missing lookup would also be valuable.
1982 hexlookup, prefix lookup and missing lookup would also be valuable.
1983 """
1983 """
1984 import mercurial.revlog
1984 import mercurial.revlog
1985
1985
1986 opts = _byteskwargs(opts)
1986 opts = _byteskwargs(opts)
1987 timer, fm = gettimer(ui, opts)
1987 timer, fm = gettimer(ui, opts)
1988 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1988 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1989
1989
1990 unfi = repo.unfiltered()
1990 unfi = repo.unfiltered()
1991 clearcaches = opts[b'clear_caches']
1991 clearcaches = opts[b'clear_caches']
1992 # find the filecache func directly
1992 # find the filecache func directly
1993 # This avoid polluting the benchmark with the filecache logic
1993 # This avoid polluting the benchmark with the filecache logic
1994 makecl = unfi.__class__.changelog.func
1994 makecl = unfi.__class__.changelog.func
1995 if not opts[b'rev']:
1995 if not opts[b'rev']:
1996 raise error.Abort(b'use --rev to specify revisions to look up')
1996 raise error.Abort(b'use --rev to specify revisions to look up')
1997 revs = scmutil.revrange(repo, opts[b'rev'])
1997 revs = scmutil.revrange(repo, opts[b'rev'])
1998 cl = repo.changelog
1998 cl = repo.changelog
1999 nodes = [cl.node(r) for r in revs]
1999 nodes = [cl.node(r) for r in revs]
2000
2000
2001 # use a list to pass reference to a nodemap from one closure to the next
2001 # use a list to pass reference to a nodemap from one closure to the next
2002 nodeget = [None]
2002 nodeget = [None]
2003
2003
2004 def setnodeget():
2004 def setnodeget():
2005 # probably not necessary, but for good measure
2005 # probably not necessary, but for good measure
2006 clearchangelog(unfi)
2006 clearchangelog(unfi)
2007 cl = makecl(unfi)
2007 cl = makecl(unfi)
2008 if util.safehasattr(cl.index, 'get_rev'):
2008 if util.safehasattr(cl.index, 'get_rev'):
2009 nodeget[0] = cl.index.get_rev
2009 nodeget[0] = cl.index.get_rev
2010 else:
2010 else:
2011 nodeget[0] = cl.nodemap.get
2011 nodeget[0] = cl.nodemap.get
2012
2012
2013 def d():
2013 def d():
2014 get = nodeget[0]
2014 get = nodeget[0]
2015 for n in nodes:
2015 for n in nodes:
2016 get(n)
2016 get(n)
2017
2017
2018 setup = None
2018 setup = None
2019 if clearcaches:
2019 if clearcaches:
2020
2020
2021 def setup():
2021 def setup():
2022 setnodeget()
2022 setnodeget()
2023
2023
2024 else:
2024 else:
2025 setnodeget()
2025 setnodeget()
2026 d() # prewarm the data structure
2026 d() # prewarm the data structure
2027 timer(d, setup=setup)
2027 timer(d, setup=setup)
2028 fm.end()
2028 fm.end()
2029
2029
2030
2030
2031 @command(b'perf::startup|perfstartup', formatteropts)
2031 @command(b'perf::startup|perfstartup', formatteropts)
2032 def perfstartup(ui, repo, **opts):
2032 def perfstartup(ui, repo, **opts):
2033 opts = _byteskwargs(opts)
2033 opts = _byteskwargs(opts)
2034 timer, fm = gettimer(ui, opts)
2034 timer, fm = gettimer(ui, opts)
2035
2035
2036 def d():
2036 def d():
2037 if os.name != 'nt':
2037 if os.name != 'nt':
2038 os.system(
2038 os.system(
2039 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2039 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2040 )
2040 )
2041 else:
2041 else:
2042 os.environ['HGRCPATH'] = r' '
2042 os.environ['HGRCPATH'] = r' '
2043 os.system("%s version -q > NUL" % sys.argv[0])
2043 os.system("%s version -q > NUL" % sys.argv[0])
2044
2044
2045 timer(d)
2045 timer(d)
2046 fm.end()
2046 fm.end()
2047
2047
2048
2048
2049 def _find_stream_generator(version):
2049 def _find_stream_generator(version):
2050 """find the proper generator function for this stream version"""
2050 """find the proper generator function for this stream version"""
2051 import mercurial.streamclone
2051 import mercurial.streamclone
2052
2052
2053 available = {}
2053 available = {}
2054
2054
2055 # try to fetch a v1 generator
2055 # try to fetch a v1 generator
2056 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2056 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2057 if generatev1 is not None:
2057 if generatev1 is not None:
2058
2058
2059 def generate(repo):
2059 def generate(repo):
2060 entries, bytes, data = generatev1(repo, None, None, True)
2060 entries, bytes, data = generatev1(repo, None, None, True)
2061 return data
2061 return data
2062
2062
2063 available[b'v1'] = generatev1
2063 available[b'v1'] = generatev1
2064 # try to fetch a v2 generator
2064 # try to fetch a v2 generator
2065 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2065 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2066 if generatev2 is not None:
2066 if generatev2 is not None:
2067
2067
2068 def generate(repo):
2068 def generate(repo):
2069 entries, bytes, data = generatev2(repo, None, None, True)
2069 entries, bytes, data = generatev2(repo, None, None, True)
2070 return data
2070 return data
2071
2071
2072 available[b'v2'] = generate
2072 available[b'v2'] = generate
2073 # try to fetch a v3 generator
2073 # try to fetch a v3 generator
2074 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2074 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2075 if generatev3 is not None:
2075 if generatev3 is not None:
2076
2076
2077 def generate(repo):
2077 def generate(repo):
2078 return generatev3(repo, None, None, True)
2078 return generatev3(repo, None, None, True)
2079
2079
2080 available[b'v3-exp'] = generate
2080 available[b'v3-exp'] = generate
2081
2081
2082 # resolve the request
2082 # resolve the request
2083 if version == b"latest":
2083 if version == b"latest":
2084 # latest is the highest non experimental version
2084 # latest is the highest non experimental version
2085 latest_key = max(v for v in available if b'-exp' not in v)
2085 latest_key = max(v for v in available if b'-exp' not in v)
2086 return available[latest_key]
2086 return available[latest_key]
2087 elif version in available:
2087 elif version in available:
2088 return available[version]
2088 return available[version]
2089 else:
2089 else:
2090 msg = b"unkown or unavailable version: %s"
2090 msg = b"unkown or unavailable version: %s"
2091 msg %= version
2091 msg %= version
2092 hint = b"available versions: %s"
2092 hint = b"available versions: %s"
2093 hint %= b', '.join(sorted(available))
2093 hint %= b', '.join(sorted(available))
2094 raise error.Abort(msg, hint=hint)
2094 raise error.Abort(msg, hint=hint)
2095
2095
2096
2096
2097 @command(
2097 @command(
2098 b'perf::stream-locked-section',
2098 b'perf::stream-locked-section',
2099 [
2099 [
2100 (
2100 (
2101 b'',
2101 b'',
2102 b'stream-version',
2102 b'stream-version',
2103 b'latest',
2103 b'latest',
2104 b'stream version to use ("v1", "v2", "v3-exp" '
2104 b'stream version to use ("v1", "v2", "v3-exp" '
2105 b'or "latest", (the default))',
2105 b'or "latest", (the default))',
2106 ),
2106 ),
2107 ]
2107 ]
2108 + formatteropts,
2108 + formatteropts,
2109 )
2109 )
2110 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2110 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2111 """benchmark the initial, repo-locked, section of a stream-clone"""
2111 """benchmark the initial, repo-locked, section of a stream-clone"""
2112
2112
2113 opts = _byteskwargs(opts)
2113 opts = _byteskwargs(opts)
2114 timer, fm = gettimer(ui, opts)
2114 timer, fm = gettimer(ui, opts)
2115
2115
2116 # deletion of the generator may trigger some cleanup that we do not want to
2116 # deletion of the generator may trigger some cleanup that we do not want to
2117 # measure
2117 # measure
2118 result_holder = [None]
2118 result_holder = [None]
2119
2119
2120 def setupone():
2120 def setupone():
2121 result_holder[0] = None
2121 result_holder[0] = None
2122
2122
2123 generate = _find_stream_generator(stream_version)
2123 generate = _find_stream_generator(stream_version)
2124
2124
2125 def runone():
2125 def runone():
2126 # the lock is held for the duration the initialisation
2126 # the lock is held for the duration the initialisation
2127 result_holder[0] = generate(repo)
2127 result_holder[0] = generate(repo)
2128
2128
2129 timer(runone, setup=setupone, title=b"load")
2129 timer(runone, setup=setupone, title=b"load")
2130 fm.end()
2130 fm.end()
2131
2131
2132
2132
2133 @command(
2133 @command(
2134 b'perf::stream-generate',
2134 b'perf::stream-generate',
2135 [
2135 [
2136 (
2136 (
2137 b'',
2137 b'',
2138 b'stream-version',
2138 b'stream-version',
2139 b'latest',
2139 b'latest',
2140 b'stream version to us ("v1", "v2", "v3-exp" '
2140 b'stream version to us ("v1", "v2", "v3-exp" '
2141 b'or "latest", (the default))',
2141 b'or "latest", (the default))',
2142 ),
2142 ),
2143 ]
2143 ]
2144 + formatteropts,
2144 + formatteropts,
2145 )
2145 )
2146 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2146 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2147 """benchmark the full generation of a stream clone"""
2147 """benchmark the full generation of a stream clone"""
2148
2148
2149 opts = _byteskwargs(opts)
2149 opts = _byteskwargs(opts)
2150 timer, fm = gettimer(ui, opts)
2150 timer, fm = gettimer(ui, opts)
2151
2151
2152 # deletion of the generator may trigger some cleanup that we do not want to
2152 # deletion of the generator may trigger some cleanup that we do not want to
2153 # measure
2153 # measure
2154
2154
2155 generate = _find_stream_generator(stream_version)
2155 generate = _find_stream_generator(stream_version)
2156
2156
2157 def runone():
2157 def runone():
2158 # the lock is held for the duration the initialisation
2158 # the lock is held for the duration the initialisation
2159 for chunk in generate(repo):
2159 for chunk in generate(repo):
2160 pass
2160 pass
2161
2161
2162 timer(runone, title=b"generate")
2162 timer(runone, title=b"generate")
2163 fm.end()
2163 fm.end()
2164
2164
2165
2165
2166 @command(
2166 @command(
2167 b'perf::stream-consume',
2167 b'perf::stream-consume',
2168 formatteropts,
2168 formatteropts,
2169 )
2169 )
2170 def perf_stream_clone_consume(ui, repo, filename, **opts):
2170 def perf_stream_clone_consume(ui, repo, filename, **opts):
2171 """benchmark the full application of a stream clone
2171 """benchmark the full application of a stream clone
2172
2172
2173 This include the creation of the repository
2173 This include the creation of the repository
2174 """
2174 """
2175 # try except to appease check code
2175 # try except to appease check code
2176 msg = b"mercurial too old, missing necessary module: %s"
2176 msg = b"mercurial too old, missing necessary module: %s"
2177 try:
2177 try:
2178 from mercurial import bundle2
2178 from mercurial import bundle2
2179 except ImportError as exc:
2179 except ImportError as exc:
2180 msg %= _bytestr(exc)
2180 msg %= _bytestr(exc)
2181 raise error.Abort(msg)
2181 raise error.Abort(msg)
2182 try:
2182 try:
2183 from mercurial import exchange
2183 from mercurial import exchange
2184 except ImportError as exc:
2184 except ImportError as exc:
2185 msg %= _bytestr(exc)
2185 msg %= _bytestr(exc)
2186 raise error.Abort(msg)
2186 raise error.Abort(msg)
2187 try:
2187 try:
2188 from mercurial import hg
2188 from mercurial import hg
2189 except ImportError as exc:
2189 except ImportError as exc:
2190 msg %= _bytestr(exc)
2190 msg %= _bytestr(exc)
2191 raise error.Abort(msg)
2191 raise error.Abort(msg)
2192 try:
2192 try:
2193 from mercurial import localrepo
2193 from mercurial import localrepo
2194 except ImportError as exc:
2194 except ImportError as exc:
2195 msg %= _bytestr(exc)
2195 msg %= _bytestr(exc)
2196 raise error.Abort(msg)
2196 raise error.Abort(msg)
2197
2197
2198 opts = _byteskwargs(opts)
2198 opts = _byteskwargs(opts)
2199 timer, fm = gettimer(ui, opts)
2199 timer, fm = gettimer(ui, opts)
2200
2200
2201 # deletion of the generator may trigger some cleanup that we do not want to
2201 # deletion of the generator may trigger some cleanup that we do not want to
2202 # measure
2202 # measure
2203 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2203 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2204 raise error.Abort("not a readable file: %s" % filename)
2204 raise error.Abort("not a readable file: %s" % filename)
2205
2205
2206 run_variables = [None, None]
2206 run_variables = [None, None]
2207
2207
2208 # we create the new repository next to the other one for two reasons:
2208 # we create the new repository next to the other one for two reasons:
2209 # - this way we use the same file system, which are relevant for benchmark
2209 # - this way we use the same file system, which are relevant for benchmark
2210 # - if /tmp/ is small, the operation could overfills it.
2210 # - if /tmp/ is small, the operation could overfills it.
2211 source_repo_dir = os.path.dirname(repo.root)
2211 source_repo_dir = os.path.dirname(repo.root)
2212
2212
2213 @contextlib.contextmanager
2213 @contextlib.contextmanager
2214 def context():
2214 def context():
2215 with open(filename, mode='rb') as bundle:
2215 with open(filename, mode='rb') as bundle:
2216 with tempfile.TemporaryDirectory(
2216 with tempfile.TemporaryDirectory(
2217 prefix=b'hg-perf-stream-consume-',
2217 prefix=b'hg-perf-stream-consume-',
2218 dir=source_repo_dir,
2218 dir=source_repo_dir,
2219 ) as tmp_dir:
2219 ) as tmp_dir:
2220 tmp_dir = fsencode(tmp_dir)
2220 tmp_dir = fsencode(tmp_dir)
2221 run_variables[0] = bundle
2221 run_variables[0] = bundle
2222 run_variables[1] = tmp_dir
2222 run_variables[1] = tmp_dir
2223 yield
2223 yield
2224 run_variables[0] = None
2224 run_variables[0] = None
2225 run_variables[1] = None
2225 run_variables[1] = None
2226
2226
2227 def runone():
2227 def runone():
2228 bundle = run_variables[0]
2228 bundle = run_variables[0]
2229 tmp_dir = run_variables[1]
2229 tmp_dir = run_variables[1]
2230
2230
2231 # we actually wants to copy all config to ensure the repo config is
2231 # we actually wants to copy all config to ensure the repo config is
2232 # taken in account during the benchmark
2232 # taken in account during the benchmark
2233 new_ui = repo.ui.__class__(repo.ui)
2233 new_ui = repo.ui.__class__(repo.ui)
2234 # only pass ui when no srcrepo
2234 # only pass ui when no srcrepo
2235 localrepo.createrepository(
2235 localrepo.createrepository(
2236 new_ui, tmp_dir, requirements=repo.requirements
2236 new_ui, tmp_dir, requirements=repo.requirements
2237 )
2237 )
2238 target = hg.repository(new_ui, tmp_dir)
2238 target = hg.repository(new_ui, tmp_dir)
2239 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2239 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2240 # stream v1
2240 # stream v1
2241 if util.safehasattr(gen, 'apply'):
2241 if util.safehasattr(gen, 'apply'):
2242 gen.apply(target)
2242 gen.apply(target)
2243 else:
2243 else:
2244 with target.transaction(b"perf::stream-consume") as tr:
2244 with target.transaction(b"perf::stream-consume") as tr:
2245 bundle2.applybundle(
2245 bundle2.applybundle(
2246 target,
2246 target,
2247 gen,
2247 gen,
2248 tr,
2248 tr,
2249 source=b'unbundle',
2249 source=b'unbundle',
2250 url=filename,
2250 url=filename,
2251 )
2251 )
2252
2252
2253 timer(runone, context=context, title=b"consume")
2253 timer(runone, context=context, title=b"consume")
2254 fm.end()
2254 fm.end()
2255
2255
2256
2256
2257 @command(b'perf::parents|perfparents', formatteropts)
2257 @command(b'perf::parents|perfparents', formatteropts)
2258 def perfparents(ui, repo, **opts):
2258 def perfparents(ui, repo, **opts):
2259 """benchmark the time necessary to fetch one changeset's parents.
2259 """benchmark the time necessary to fetch one changeset's parents.
2260
2260
2261 The fetch is done using the `node identifier`, traversing all object layers
2261 The fetch is done using the `node identifier`, traversing all object layers
2262 from the repository object. The first N revisions will be used for this
2262 from the repository object. The first N revisions will be used for this
2263 benchmark. N is controlled by the ``perf.parentscount`` config option
2263 benchmark. N is controlled by the ``perf.parentscount`` config option
2264 (default: 1000).
2264 (default: 1000).
2265 """
2265 """
2266 opts = _byteskwargs(opts)
2266 opts = _byteskwargs(opts)
2267 timer, fm = gettimer(ui, opts)
2267 timer, fm = gettimer(ui, opts)
2268 # control the number of commits perfparents iterates over
2268 # control the number of commits perfparents iterates over
2269 # experimental config: perf.parentscount
2269 # experimental config: perf.parentscount
2270 count = getint(ui, b"perf", b"parentscount", 1000)
2270 count = getint(ui, b"perf", b"parentscount", 1000)
2271 if len(repo.changelog) < count:
2271 if len(repo.changelog) < count:
2272 raise error.Abort(b"repo needs %d commits for this test" % count)
2272 raise error.Abort(b"repo needs %d commits for this test" % count)
2273 repo = repo.unfiltered()
2273 repo = repo.unfiltered()
2274 nl = [repo.changelog.node(i) for i in _xrange(count)]
2274 nl = [repo.changelog.node(i) for i in _xrange(count)]
2275
2275
2276 def d():
2276 def d():
2277 for n in nl:
2277 for n in nl:
2278 repo.changelog.parents(n)
2278 repo.changelog.parents(n)
2279
2279
2280 timer(d)
2280 timer(d)
2281 fm.end()
2281 fm.end()
2282
2282
2283
2283
2284 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2284 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2285 def perfctxfiles(ui, repo, x, **opts):
2285 def perfctxfiles(ui, repo, x, **opts):
2286 opts = _byteskwargs(opts)
2286 opts = _byteskwargs(opts)
2287 x = int(x)
2287 x = int(x)
2288 timer, fm = gettimer(ui, opts)
2288 timer, fm = gettimer(ui, opts)
2289
2289
2290 def d():
2290 def d():
2291 len(repo[x].files())
2291 len(repo[x].files())
2292
2292
2293 timer(d)
2293 timer(d)
2294 fm.end()
2294 fm.end()
2295
2295
2296
2296
2297 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2297 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2298 def perfrawfiles(ui, repo, x, **opts):
2298 def perfrawfiles(ui, repo, x, **opts):
2299 opts = _byteskwargs(opts)
2299 opts = _byteskwargs(opts)
2300 x = int(x)
2300 x = int(x)
2301 timer, fm = gettimer(ui, opts)
2301 timer, fm = gettimer(ui, opts)
2302 cl = repo.changelog
2302 cl = repo.changelog
2303
2303
2304 def d():
2304 def d():
2305 len(cl.read(x)[3])
2305 len(cl.read(x)[3])
2306
2306
2307 timer(d)
2307 timer(d)
2308 fm.end()
2308 fm.end()
2309
2309
2310
2310
2311 @command(b'perf::lookup|perflookup', formatteropts)
2311 @command(b'perf::lookup|perflookup', formatteropts)
2312 def perflookup(ui, repo, rev, **opts):
2312 def perflookup(ui, repo, rev, **opts):
2313 opts = _byteskwargs(opts)
2313 opts = _byteskwargs(opts)
2314 timer, fm = gettimer(ui, opts)
2314 timer, fm = gettimer(ui, opts)
2315 timer(lambda: len(repo.lookup(rev)))
2315 timer(lambda: len(repo.lookup(rev)))
2316 fm.end()
2316 fm.end()
2317
2317
2318
2318
2319 @command(
2319 @command(
2320 b'perf::linelogedits|perflinelogedits',
2320 b'perf::linelogedits|perflinelogedits',
2321 [
2321 [
2322 (b'n', b'edits', 10000, b'number of edits'),
2322 (b'n', b'edits', 10000, b'number of edits'),
2323 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2323 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2324 ],
2324 ],
2325 norepo=True,
2325 norepo=True,
2326 )
2326 )
2327 def perflinelogedits(ui, **opts):
2327 def perflinelogedits(ui, **opts):
2328 from mercurial import linelog
2328 from mercurial import linelog
2329
2329
2330 opts = _byteskwargs(opts)
2330 opts = _byteskwargs(opts)
2331
2331
2332 edits = opts[b'edits']
2332 edits = opts[b'edits']
2333 maxhunklines = opts[b'max_hunk_lines']
2333 maxhunklines = opts[b'max_hunk_lines']
2334
2334
2335 maxb1 = 100000
2335 maxb1 = 100000
2336 random.seed(0)
2336 random.seed(0)
2337 randint = random.randint
2337 randint = random.randint
2338 currentlines = 0
2338 currentlines = 0
2339 arglist = []
2339 arglist = []
2340 for rev in _xrange(edits):
2340 for rev in _xrange(edits):
2341 a1 = randint(0, currentlines)
2341 a1 = randint(0, currentlines)
2342 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2342 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2343 b1 = randint(0, maxb1)
2343 b1 = randint(0, maxb1)
2344 b2 = randint(b1, b1 + maxhunklines)
2344 b2 = randint(b1, b1 + maxhunklines)
2345 currentlines += (b2 - b1) - (a2 - a1)
2345 currentlines += (b2 - b1) - (a2 - a1)
2346 arglist.append((rev, a1, a2, b1, b2))
2346 arglist.append((rev, a1, a2, b1, b2))
2347
2347
2348 def d():
2348 def d():
2349 ll = linelog.linelog()
2349 ll = linelog.linelog()
2350 for args in arglist:
2350 for args in arglist:
2351 ll.replacelines(*args)
2351 ll.replacelines(*args)
2352
2352
2353 timer, fm = gettimer(ui, opts)
2353 timer, fm = gettimer(ui, opts)
2354 timer(d)
2354 timer(d)
2355 fm.end()
2355 fm.end()
2356
2356
2357
2357
2358 @command(b'perf::revrange|perfrevrange', formatteropts)
2358 @command(b'perf::revrange|perfrevrange', formatteropts)
2359 def perfrevrange(ui, repo, *specs, **opts):
2359 def perfrevrange(ui, repo, *specs, **opts):
2360 opts = _byteskwargs(opts)
2360 opts = _byteskwargs(opts)
2361 timer, fm = gettimer(ui, opts)
2361 timer, fm = gettimer(ui, opts)
2362 revrange = scmutil.revrange
2362 revrange = scmutil.revrange
2363 timer(lambda: len(revrange(repo, specs)))
2363 timer(lambda: len(revrange(repo, specs)))
2364 fm.end()
2364 fm.end()
2365
2365
2366
2366
2367 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2367 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2368 def perfnodelookup(ui, repo, rev, **opts):
2368 def perfnodelookup(ui, repo, rev, **opts):
2369 opts = _byteskwargs(opts)
2369 opts = _byteskwargs(opts)
2370 timer, fm = gettimer(ui, opts)
2370 timer, fm = gettimer(ui, opts)
2371 import mercurial.revlog
2371 import mercurial.revlog
2372
2372
2373 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2373 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2374 n = scmutil.revsingle(repo, rev).node()
2374 n = scmutil.revsingle(repo, rev).node()
2375
2375
2376 try:
2376 try:
2377 cl = revlog(getsvfs(repo), radix=b"00changelog")
2377 cl = revlog(getsvfs(repo), radix=b"00changelog")
2378 except TypeError:
2378 except TypeError:
2379 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2379 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2380
2380
2381 def d():
2381 def d():
2382 cl.rev(n)
2382 cl.rev(n)
2383 clearcaches(cl)
2383 clearcaches(cl)
2384
2384
2385 timer(d)
2385 timer(d)
2386 fm.end()
2386 fm.end()
2387
2387
2388
2388
2389 @command(
2389 @command(
2390 b'perf::log|perflog',
2390 b'perf::log|perflog',
2391 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2391 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2392 )
2392 )
2393 def perflog(ui, repo, rev=None, **opts):
2393 def perflog(ui, repo, rev=None, **opts):
2394 opts = _byteskwargs(opts)
2394 opts = _byteskwargs(opts)
2395 if rev is None:
2395 if rev is None:
2396 rev = []
2396 rev = []
2397 timer, fm = gettimer(ui, opts)
2397 timer, fm = gettimer(ui, opts)
2398 ui.pushbuffer()
2398 ui.pushbuffer()
2399 timer(
2399 timer(
2400 lambda: commands.log(
2400 lambda: commands.log(
2401 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2401 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2402 )
2402 )
2403 )
2403 )
2404 ui.popbuffer()
2404 ui.popbuffer()
2405 fm.end()
2405 fm.end()
2406
2406
2407
2407
2408 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2408 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2409 def perfmoonwalk(ui, repo, **opts):
2409 def perfmoonwalk(ui, repo, **opts):
2410 """benchmark walking the changelog backwards
2410 """benchmark walking the changelog backwards
2411
2411
2412 This also loads the changelog data for each revision in the changelog.
2412 This also loads the changelog data for each revision in the changelog.
2413 """
2413 """
2414 opts = _byteskwargs(opts)
2414 opts = _byteskwargs(opts)
2415 timer, fm = gettimer(ui, opts)
2415 timer, fm = gettimer(ui, opts)
2416
2416
2417 def moonwalk():
2417 def moonwalk():
2418 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2418 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2419 ctx = repo[i]
2419 ctx = repo[i]
2420 ctx.branch() # read changelog data (in addition to the index)
2420 ctx.branch() # read changelog data (in addition to the index)
2421
2421
2422 timer(moonwalk)
2422 timer(moonwalk)
2423 fm.end()
2423 fm.end()
2424
2424
2425
2425
2426 @command(
2426 @command(
2427 b'perf::templating|perftemplating',
2427 b'perf::templating|perftemplating',
2428 [
2428 [
2429 (b'r', b'rev', [], b'revisions to run the template on'),
2429 (b'r', b'rev', [], b'revisions to run the template on'),
2430 ]
2430 ]
2431 + formatteropts,
2431 + formatteropts,
2432 )
2432 )
2433 def perftemplating(ui, repo, testedtemplate=None, **opts):
2433 def perftemplating(ui, repo, testedtemplate=None, **opts):
2434 """test the rendering time of a given template"""
2434 """test the rendering time of a given template"""
2435 if makelogtemplater is None:
2435 if makelogtemplater is None:
2436 raise error.Abort(
2436 raise error.Abort(
2437 b"perftemplating not available with this Mercurial",
2437 b"perftemplating not available with this Mercurial",
2438 hint=b"use 4.3 or later",
2438 hint=b"use 4.3 or later",
2439 )
2439 )
2440
2440
2441 opts = _byteskwargs(opts)
2441 opts = _byteskwargs(opts)
2442
2442
2443 nullui = ui.copy()
2443 nullui = ui.copy()
2444 nullui.fout = open(os.devnull, 'wb')
2444 nullui.fout = open(os.devnull, 'wb')
2445 nullui.disablepager()
2445 nullui.disablepager()
2446 revs = opts.get(b'rev')
2446 revs = opts.get(b'rev')
2447 if not revs:
2447 if not revs:
2448 revs = [b'all()']
2448 revs = [b'all()']
2449 revs = list(scmutil.revrange(repo, revs))
2449 revs = list(scmutil.revrange(repo, revs))
2450
2450
2451 defaulttemplate = (
2451 defaulttemplate = (
2452 b'{date|shortdate} [{rev}:{node|short}]'
2452 b'{date|shortdate} [{rev}:{node|short}]'
2453 b' {author|person}: {desc|firstline}\n'
2453 b' {author|person}: {desc|firstline}\n'
2454 )
2454 )
2455 if testedtemplate is None:
2455 if testedtemplate is None:
2456 testedtemplate = defaulttemplate
2456 testedtemplate = defaulttemplate
2457 displayer = makelogtemplater(nullui, repo, testedtemplate)
2457 displayer = makelogtemplater(nullui, repo, testedtemplate)
2458
2458
2459 def format():
2459 def format():
2460 for r in revs:
2460 for r in revs:
2461 ctx = repo[r]
2461 ctx = repo[r]
2462 displayer.show(ctx)
2462 displayer.show(ctx)
2463 displayer.flush(ctx)
2463 displayer.flush(ctx)
2464
2464
2465 timer, fm = gettimer(ui, opts)
2465 timer, fm = gettimer(ui, opts)
2466 timer(format)
2466 timer(format)
2467 fm.end()
2467 fm.end()
2468
2468
2469
2469
2470 def _displaystats(ui, opts, entries, data):
2470 def _displaystats(ui, opts, entries, data):
2471 # use a second formatter because the data are quite different, not sure
2471 # use a second formatter because the data are quite different, not sure
2472 # how it flies with the templater.
2472 # how it flies with the templater.
2473 fm = ui.formatter(b'perf-stats', opts)
2473 fm = ui.formatter(b'perf-stats', opts)
2474 for key, title in entries:
2474 for key, title in entries:
2475 values = data[key]
2475 values = data[key]
2476 nbvalues = len(data)
2476 nbvalues = len(data)
2477 values.sort()
2477 values.sort()
2478 stats = {
2478 stats = {
2479 'key': key,
2479 'key': key,
2480 'title': title,
2480 'title': title,
2481 'nbitems': len(values),
2481 'nbitems': len(values),
2482 'min': values[0][0],
2482 'min': values[0][0],
2483 '10%': values[(nbvalues * 10) // 100][0],
2483 '10%': values[(nbvalues * 10) // 100][0],
2484 '25%': values[(nbvalues * 25) // 100][0],
2484 '25%': values[(nbvalues * 25) // 100][0],
2485 '50%': values[(nbvalues * 50) // 100][0],
2485 '50%': values[(nbvalues * 50) // 100][0],
2486 '75%': values[(nbvalues * 75) // 100][0],
2486 '75%': values[(nbvalues * 75) // 100][0],
2487 '80%': values[(nbvalues * 80) // 100][0],
2487 '80%': values[(nbvalues * 80) // 100][0],
2488 '85%': values[(nbvalues * 85) // 100][0],
2488 '85%': values[(nbvalues * 85) // 100][0],
2489 '90%': values[(nbvalues * 90) // 100][0],
2489 '90%': values[(nbvalues * 90) // 100][0],
2490 '95%': values[(nbvalues * 95) // 100][0],
2490 '95%': values[(nbvalues * 95) // 100][0],
2491 '99%': values[(nbvalues * 99) // 100][0],
2491 '99%': values[(nbvalues * 99) // 100][0],
2492 'max': values[-1][0],
2492 'max': values[-1][0],
2493 }
2493 }
2494 fm.startitem()
2494 fm.startitem()
2495 fm.data(**stats)
2495 fm.data(**stats)
2496 # make node pretty for the human output
2496 # make node pretty for the human output
2497 fm.plain('### %s (%d items)\n' % (title, len(values)))
2497 fm.plain('### %s (%d items)\n' % (title, len(values)))
2498 lines = [
2498 lines = [
2499 'min',
2499 'min',
2500 '10%',
2500 '10%',
2501 '25%',
2501 '25%',
2502 '50%',
2502 '50%',
2503 '75%',
2503 '75%',
2504 '80%',
2504 '80%',
2505 '85%',
2505 '85%',
2506 '90%',
2506 '90%',
2507 '95%',
2507 '95%',
2508 '99%',
2508 '99%',
2509 'max',
2509 'max',
2510 ]
2510 ]
2511 for l in lines:
2511 for l in lines:
2512 fm.plain('%s: %s\n' % (l, stats[l]))
2512 fm.plain('%s: %s\n' % (l, stats[l]))
2513 fm.end()
2513 fm.end()
2514
2514
2515
2515
2516 @command(
2516 @command(
2517 b'perf::helper-mergecopies|perfhelper-mergecopies',
2517 b'perf::helper-mergecopies|perfhelper-mergecopies',
2518 formatteropts
2518 formatteropts
2519 + [
2519 + [
2520 (b'r', b'revs', [], b'restrict search to these revisions'),
2520 (b'r', b'revs', [], b'restrict search to these revisions'),
2521 (b'', b'timing', False, b'provides extra data (costly)'),
2521 (b'', b'timing', False, b'provides extra data (costly)'),
2522 (b'', b'stats', False, b'provides statistic about the measured data'),
2522 (b'', b'stats', False, b'provides statistic about the measured data'),
2523 ],
2523 ],
2524 )
2524 )
2525 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2525 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2526 """find statistics about potential parameters for `perfmergecopies`
2526 """find statistics about potential parameters for `perfmergecopies`
2527
2527
2528 This command find (base, p1, p2) triplet relevant for copytracing
2528 This command find (base, p1, p2) triplet relevant for copytracing
2529 benchmarking in the context of a merge. It reports values for some of the
2529 benchmarking in the context of a merge. It reports values for some of the
2530 parameters that impact merge copy tracing time during merge.
2530 parameters that impact merge copy tracing time during merge.
2531
2531
2532 If `--timing` is set, rename detection is run and the associated timing
2532 If `--timing` is set, rename detection is run and the associated timing
2533 will be reported. The extra details come at the cost of slower command
2533 will be reported. The extra details come at the cost of slower command
2534 execution.
2534 execution.
2535
2535
2536 Since rename detection is only run once, other factors might easily
2536 Since rename detection is only run once, other factors might easily
2537 affect the precision of the timing. However it should give a good
2537 affect the precision of the timing. However it should give a good
2538 approximation of which revision triplets are very costly.
2538 approximation of which revision triplets are very costly.
2539 """
2539 """
2540 opts = _byteskwargs(opts)
2540 opts = _byteskwargs(opts)
2541 fm = ui.formatter(b'perf', opts)
2541 fm = ui.formatter(b'perf', opts)
2542 dotiming = opts[b'timing']
2542 dotiming = opts[b'timing']
2543 dostats = opts[b'stats']
2543 dostats = opts[b'stats']
2544
2544
2545 output_template = [
2545 output_template = [
2546 ("base", "%(base)12s"),
2546 ("base", "%(base)12s"),
2547 ("p1", "%(p1.node)12s"),
2547 ("p1", "%(p1.node)12s"),
2548 ("p2", "%(p2.node)12s"),
2548 ("p2", "%(p2.node)12s"),
2549 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2549 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2550 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2550 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2551 ("p1.renames", "%(p1.renamedfiles)12d"),
2551 ("p1.renames", "%(p1.renamedfiles)12d"),
2552 ("p1.time", "%(p1.time)12.3f"),
2552 ("p1.time", "%(p1.time)12.3f"),
2553 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2553 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2554 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2554 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2555 ("p2.renames", "%(p2.renamedfiles)12d"),
2555 ("p2.renames", "%(p2.renamedfiles)12d"),
2556 ("p2.time", "%(p2.time)12.3f"),
2556 ("p2.time", "%(p2.time)12.3f"),
2557 ("renames", "%(nbrenamedfiles)12d"),
2557 ("renames", "%(nbrenamedfiles)12d"),
2558 ("total.time", "%(time)12.3f"),
2558 ("total.time", "%(time)12.3f"),
2559 ]
2559 ]
2560 if not dotiming:
2560 if not dotiming:
2561 output_template = [
2561 output_template = [
2562 i
2562 i
2563 for i in output_template
2563 for i in output_template
2564 if not ('time' in i[0] or 'renames' in i[0])
2564 if not ('time' in i[0] or 'renames' in i[0])
2565 ]
2565 ]
2566 header_names = [h for (h, v) in output_template]
2566 header_names = [h for (h, v) in output_template]
2567 output = ' '.join([v for (h, v) in output_template]) + '\n'
2567 output = ' '.join([v for (h, v) in output_template]) + '\n'
2568 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2568 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2569 fm.plain(header % tuple(header_names))
2569 fm.plain(header % tuple(header_names))
2570
2570
2571 if not revs:
2571 if not revs:
2572 revs = ['all()']
2572 revs = ['all()']
2573 revs = scmutil.revrange(repo, revs)
2573 revs = scmutil.revrange(repo, revs)
2574
2574
2575 if dostats:
2575 if dostats:
2576 alldata = {
2576 alldata = {
2577 'nbrevs': [],
2577 'nbrevs': [],
2578 'nbmissingfiles': [],
2578 'nbmissingfiles': [],
2579 }
2579 }
2580 if dotiming:
2580 if dotiming:
2581 alldata['parentnbrenames'] = []
2581 alldata['parentnbrenames'] = []
2582 alldata['totalnbrenames'] = []
2582 alldata['totalnbrenames'] = []
2583 alldata['parenttime'] = []
2583 alldata['parenttime'] = []
2584 alldata['totaltime'] = []
2584 alldata['totaltime'] = []
2585
2585
2586 roi = repo.revs('merge() and %ld', revs)
2586 roi = repo.revs('merge() and %ld', revs)
2587 for r in roi:
2587 for r in roi:
2588 ctx = repo[r]
2588 ctx = repo[r]
2589 p1 = ctx.p1()
2589 p1 = ctx.p1()
2590 p2 = ctx.p2()
2590 p2 = ctx.p2()
2591 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2591 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2592 for b in bases:
2592 for b in bases:
2593 b = repo[b]
2593 b = repo[b]
2594 p1missing = copies._computeforwardmissing(b, p1)
2594 p1missing = copies._computeforwardmissing(b, p1)
2595 p2missing = copies._computeforwardmissing(b, p2)
2595 p2missing = copies._computeforwardmissing(b, p2)
2596 data = {
2596 data = {
2597 b'base': b.hex(),
2597 b'base': b.hex(),
2598 b'p1.node': p1.hex(),
2598 b'p1.node': p1.hex(),
2599 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2599 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2600 b'p1.nbmissingfiles': len(p1missing),
2600 b'p1.nbmissingfiles': len(p1missing),
2601 b'p2.node': p2.hex(),
2601 b'p2.node': p2.hex(),
2602 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2602 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2603 b'p2.nbmissingfiles': len(p2missing),
2603 b'p2.nbmissingfiles': len(p2missing),
2604 }
2604 }
2605 if dostats:
2605 if dostats:
2606 if p1missing:
2606 if p1missing:
2607 alldata['nbrevs'].append(
2607 alldata['nbrevs'].append(
2608 (data['p1.nbrevs'], b.hex(), p1.hex())
2608 (data['p1.nbrevs'], b.hex(), p1.hex())
2609 )
2609 )
2610 alldata['nbmissingfiles'].append(
2610 alldata['nbmissingfiles'].append(
2611 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2611 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2612 )
2612 )
2613 if p2missing:
2613 if p2missing:
2614 alldata['nbrevs'].append(
2614 alldata['nbrevs'].append(
2615 (data['p2.nbrevs'], b.hex(), p2.hex())
2615 (data['p2.nbrevs'], b.hex(), p2.hex())
2616 )
2616 )
2617 alldata['nbmissingfiles'].append(
2617 alldata['nbmissingfiles'].append(
2618 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2618 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2619 )
2619 )
2620 if dotiming:
2620 if dotiming:
2621 begin = util.timer()
2621 begin = util.timer()
2622 mergedata = copies.mergecopies(repo, p1, p2, b)
2622 mergedata = copies.mergecopies(repo, p1, p2, b)
2623 end = util.timer()
2623 end = util.timer()
2624 # not very stable timing since we did only one run
2624 # not very stable timing since we did only one run
2625 data['time'] = end - begin
2625 data['time'] = end - begin
2626 # mergedata contains five dicts: "copy", "movewithdir",
2626 # mergedata contains five dicts: "copy", "movewithdir",
2627 # "diverge", "renamedelete" and "dirmove".
2627 # "diverge", "renamedelete" and "dirmove".
2628 # The first 4 are about renamed file so lets count that.
2628 # The first 4 are about renamed file so lets count that.
2629 renames = len(mergedata[0])
2629 renames = len(mergedata[0])
2630 renames += len(mergedata[1])
2630 renames += len(mergedata[1])
2631 renames += len(mergedata[2])
2631 renames += len(mergedata[2])
2632 renames += len(mergedata[3])
2632 renames += len(mergedata[3])
2633 data['nbrenamedfiles'] = renames
2633 data['nbrenamedfiles'] = renames
2634 begin = util.timer()
2634 begin = util.timer()
2635 p1renames = copies.pathcopies(b, p1)
2635 p1renames = copies.pathcopies(b, p1)
2636 end = util.timer()
2636 end = util.timer()
2637 data['p1.time'] = end - begin
2637 data['p1.time'] = end - begin
2638 begin = util.timer()
2638 begin = util.timer()
2639 p2renames = copies.pathcopies(b, p2)
2639 p2renames = copies.pathcopies(b, p2)
2640 end = util.timer()
2640 end = util.timer()
2641 data['p2.time'] = end - begin
2641 data['p2.time'] = end - begin
2642 data['p1.renamedfiles'] = len(p1renames)
2642 data['p1.renamedfiles'] = len(p1renames)
2643 data['p2.renamedfiles'] = len(p2renames)
2643 data['p2.renamedfiles'] = len(p2renames)
2644
2644
2645 if dostats:
2645 if dostats:
2646 if p1missing:
2646 if p1missing:
2647 alldata['parentnbrenames'].append(
2647 alldata['parentnbrenames'].append(
2648 (data['p1.renamedfiles'], b.hex(), p1.hex())
2648 (data['p1.renamedfiles'], b.hex(), p1.hex())
2649 )
2649 )
2650 alldata['parenttime'].append(
2650 alldata['parenttime'].append(
2651 (data['p1.time'], b.hex(), p1.hex())
2651 (data['p1.time'], b.hex(), p1.hex())
2652 )
2652 )
2653 if p2missing:
2653 if p2missing:
2654 alldata['parentnbrenames'].append(
2654 alldata['parentnbrenames'].append(
2655 (data['p2.renamedfiles'], b.hex(), p2.hex())
2655 (data['p2.renamedfiles'], b.hex(), p2.hex())
2656 )
2656 )
2657 alldata['parenttime'].append(
2657 alldata['parenttime'].append(
2658 (data['p2.time'], b.hex(), p2.hex())
2658 (data['p2.time'], b.hex(), p2.hex())
2659 )
2659 )
2660 if p1missing or p2missing:
2660 if p1missing or p2missing:
2661 alldata['totalnbrenames'].append(
2661 alldata['totalnbrenames'].append(
2662 (
2662 (
2663 data['nbrenamedfiles'],
2663 data['nbrenamedfiles'],
2664 b.hex(),
2664 b.hex(),
2665 p1.hex(),
2665 p1.hex(),
2666 p2.hex(),
2666 p2.hex(),
2667 )
2667 )
2668 )
2668 )
2669 alldata['totaltime'].append(
2669 alldata['totaltime'].append(
2670 (data['time'], b.hex(), p1.hex(), p2.hex())
2670 (data['time'], b.hex(), p1.hex(), p2.hex())
2671 )
2671 )
2672 fm.startitem()
2672 fm.startitem()
2673 fm.data(**data)
2673 fm.data(**data)
2674 # make node pretty for the human output
2674 # make node pretty for the human output
2675 out = data.copy()
2675 out = data.copy()
2676 out['base'] = fm.hexfunc(b.node())
2676 out['base'] = fm.hexfunc(b.node())
2677 out['p1.node'] = fm.hexfunc(p1.node())
2677 out['p1.node'] = fm.hexfunc(p1.node())
2678 out['p2.node'] = fm.hexfunc(p2.node())
2678 out['p2.node'] = fm.hexfunc(p2.node())
2679 fm.plain(output % out)
2679 fm.plain(output % out)
2680
2680
2681 fm.end()
2681 fm.end()
2682 if dostats:
2682 if dostats:
2683 # use a second formatter because the data are quite different, not sure
2683 # use a second formatter because the data are quite different, not sure
2684 # how it flies with the templater.
2684 # how it flies with the templater.
2685 entries = [
2685 entries = [
2686 ('nbrevs', 'number of revision covered'),
2686 ('nbrevs', 'number of revision covered'),
2687 ('nbmissingfiles', 'number of missing files at head'),
2687 ('nbmissingfiles', 'number of missing files at head'),
2688 ]
2688 ]
2689 if dotiming:
2689 if dotiming:
2690 entries.append(
2690 entries.append(
2691 ('parentnbrenames', 'rename from one parent to base')
2691 ('parentnbrenames', 'rename from one parent to base')
2692 )
2692 )
2693 entries.append(('totalnbrenames', 'total number of renames'))
2693 entries.append(('totalnbrenames', 'total number of renames'))
2694 entries.append(('parenttime', 'time for one parent'))
2694 entries.append(('parenttime', 'time for one parent'))
2695 entries.append(('totaltime', 'time for both parents'))
2695 entries.append(('totaltime', 'time for both parents'))
2696 _displaystats(ui, opts, entries, alldata)
2696 _displaystats(ui, opts, entries, alldata)
2697
2697
2698
2698
2699 @command(
2699 @command(
2700 b'perf::helper-pathcopies|perfhelper-pathcopies',
2700 b'perf::helper-pathcopies|perfhelper-pathcopies',
2701 formatteropts
2701 formatteropts
2702 + [
2702 + [
2703 (b'r', b'revs', [], b'restrict search to these revisions'),
2703 (b'r', b'revs', [], b'restrict search to these revisions'),
2704 (b'', b'timing', False, b'provides extra data (costly)'),
2704 (b'', b'timing', False, b'provides extra data (costly)'),
2705 (b'', b'stats', False, b'provides statistic about the measured data'),
2705 (b'', b'stats', False, b'provides statistic about the measured data'),
2706 ],
2706 ],
2707 )
2707 )
2708 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2708 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2709 """find statistic about potential parameters for the `perftracecopies`
2709 """find statistic about potential parameters for the `perftracecopies`
2710
2710
2711 This command find source-destination pair relevant for copytracing testing.
2711 This command find source-destination pair relevant for copytracing testing.
2712 It report value for some of the parameters that impact copy tracing time.
2712 It report value for some of the parameters that impact copy tracing time.
2713
2713
2714 If `--timing` is set, rename detection is run and the associated timing
2714 If `--timing` is set, rename detection is run and the associated timing
2715 will be reported. The extra details comes at the cost of a slower command
2715 will be reported. The extra details comes at the cost of a slower command
2716 execution.
2716 execution.
2717
2717
2718 Since the rename detection is only run once, other factors might easily
2718 Since the rename detection is only run once, other factors might easily
2719 affect the precision of the timing. However it should give a good
2719 affect the precision of the timing. However it should give a good
2720 approximation of which revision pairs are very costly.
2720 approximation of which revision pairs are very costly.
2721 """
2721 """
2722 opts = _byteskwargs(opts)
2722 opts = _byteskwargs(opts)
2723 fm = ui.formatter(b'perf', opts)
2723 fm = ui.formatter(b'perf', opts)
2724 dotiming = opts[b'timing']
2724 dotiming = opts[b'timing']
2725 dostats = opts[b'stats']
2725 dostats = opts[b'stats']
2726
2726
2727 if dotiming:
2727 if dotiming:
2728 header = '%12s %12s %12s %12s %12s %12s\n'
2728 header = '%12s %12s %12s %12s %12s %12s\n'
2729 output = (
2729 output = (
2730 "%(source)12s %(destination)12s "
2730 "%(source)12s %(destination)12s "
2731 "%(nbrevs)12d %(nbmissingfiles)12d "
2731 "%(nbrevs)12d %(nbmissingfiles)12d "
2732 "%(nbrenamedfiles)12d %(time)18.5f\n"
2732 "%(nbrenamedfiles)12d %(time)18.5f\n"
2733 )
2733 )
2734 header_names = (
2734 header_names = (
2735 "source",
2735 "source",
2736 "destination",
2736 "destination",
2737 "nb-revs",
2737 "nb-revs",
2738 "nb-files",
2738 "nb-files",
2739 "nb-renames",
2739 "nb-renames",
2740 "time",
2740 "time",
2741 )
2741 )
2742 fm.plain(header % header_names)
2742 fm.plain(header % header_names)
2743 else:
2743 else:
2744 header = '%12s %12s %12s %12s\n'
2744 header = '%12s %12s %12s %12s\n'
2745 output = (
2745 output = (
2746 "%(source)12s %(destination)12s "
2746 "%(source)12s %(destination)12s "
2747 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2747 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2748 )
2748 )
2749 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2749 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2750
2750
2751 if not revs:
2751 if not revs:
2752 revs = ['all()']
2752 revs = ['all()']
2753 revs = scmutil.revrange(repo, revs)
2753 revs = scmutil.revrange(repo, revs)
2754
2754
2755 if dostats:
2755 if dostats:
2756 alldata = {
2756 alldata = {
2757 'nbrevs': [],
2757 'nbrevs': [],
2758 'nbmissingfiles': [],
2758 'nbmissingfiles': [],
2759 }
2759 }
2760 if dotiming:
2760 if dotiming:
2761 alldata['nbrenames'] = []
2761 alldata['nbrenames'] = []
2762 alldata['time'] = []
2762 alldata['time'] = []
2763
2763
2764 roi = repo.revs('merge() and %ld', revs)
2764 roi = repo.revs('merge() and %ld', revs)
2765 for r in roi:
2765 for r in roi:
2766 ctx = repo[r]
2766 ctx = repo[r]
2767 p1 = ctx.p1().rev()
2767 p1 = ctx.p1().rev()
2768 p2 = ctx.p2().rev()
2768 p2 = ctx.p2().rev()
2769 bases = repo.changelog._commonancestorsheads(p1, p2)
2769 bases = repo.changelog._commonancestorsheads(p1, p2)
2770 for p in (p1, p2):
2770 for p in (p1, p2):
2771 for b in bases:
2771 for b in bases:
2772 base = repo[b]
2772 base = repo[b]
2773 parent = repo[p]
2773 parent = repo[p]
2774 missing = copies._computeforwardmissing(base, parent)
2774 missing = copies._computeforwardmissing(base, parent)
2775 if not missing:
2775 if not missing:
2776 continue
2776 continue
2777 data = {
2777 data = {
2778 b'source': base.hex(),
2778 b'source': base.hex(),
2779 b'destination': parent.hex(),
2779 b'destination': parent.hex(),
2780 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2780 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2781 b'nbmissingfiles': len(missing),
2781 b'nbmissingfiles': len(missing),
2782 }
2782 }
2783 if dostats:
2783 if dostats:
2784 alldata['nbrevs'].append(
2784 alldata['nbrevs'].append(
2785 (
2785 (
2786 data['nbrevs'],
2786 data['nbrevs'],
2787 base.hex(),
2787 base.hex(),
2788 parent.hex(),
2788 parent.hex(),
2789 )
2789 )
2790 )
2790 )
2791 alldata['nbmissingfiles'].append(
2791 alldata['nbmissingfiles'].append(
2792 (
2792 (
2793 data['nbmissingfiles'],
2793 data['nbmissingfiles'],
2794 base.hex(),
2794 base.hex(),
2795 parent.hex(),
2795 parent.hex(),
2796 )
2796 )
2797 )
2797 )
2798 if dotiming:
2798 if dotiming:
2799 begin = util.timer()
2799 begin = util.timer()
2800 renames = copies.pathcopies(base, parent)
2800 renames = copies.pathcopies(base, parent)
2801 end = util.timer()
2801 end = util.timer()
2802 # not very stable timing since we did only one run
2802 # not very stable timing since we did only one run
2803 data['time'] = end - begin
2803 data['time'] = end - begin
2804 data['nbrenamedfiles'] = len(renames)
2804 data['nbrenamedfiles'] = len(renames)
2805 if dostats:
2805 if dostats:
2806 alldata['time'].append(
2806 alldata['time'].append(
2807 (
2807 (
2808 data['time'],
2808 data['time'],
2809 base.hex(),
2809 base.hex(),
2810 parent.hex(),
2810 parent.hex(),
2811 )
2811 )
2812 )
2812 )
2813 alldata['nbrenames'].append(
2813 alldata['nbrenames'].append(
2814 (
2814 (
2815 data['nbrenamedfiles'],
2815 data['nbrenamedfiles'],
2816 base.hex(),
2816 base.hex(),
2817 parent.hex(),
2817 parent.hex(),
2818 )
2818 )
2819 )
2819 )
2820 fm.startitem()
2820 fm.startitem()
2821 fm.data(**data)
2821 fm.data(**data)
2822 out = data.copy()
2822 out = data.copy()
2823 out['source'] = fm.hexfunc(base.node())
2823 out['source'] = fm.hexfunc(base.node())
2824 out['destination'] = fm.hexfunc(parent.node())
2824 out['destination'] = fm.hexfunc(parent.node())
2825 fm.plain(output % out)
2825 fm.plain(output % out)
2826
2826
2827 fm.end()
2827 fm.end()
2828 if dostats:
2828 if dostats:
2829 entries = [
2829 entries = [
2830 ('nbrevs', 'number of revision covered'),
2830 ('nbrevs', 'number of revision covered'),
2831 ('nbmissingfiles', 'number of missing files at head'),
2831 ('nbmissingfiles', 'number of missing files at head'),
2832 ]
2832 ]
2833 if dotiming:
2833 if dotiming:
2834 entries.append(('nbrenames', 'renamed files'))
2834 entries.append(('nbrenames', 'renamed files'))
2835 entries.append(('time', 'time'))
2835 entries.append(('time', 'time'))
2836 _displaystats(ui, opts, entries, alldata)
2836 _displaystats(ui, opts, entries, alldata)
2837
2837
2838
2838
2839 @command(b'perf::cca|perfcca', formatteropts)
2839 @command(b'perf::cca|perfcca', formatteropts)
2840 def perfcca(ui, repo, **opts):
2840 def perfcca(ui, repo, **opts):
2841 opts = _byteskwargs(opts)
2841 opts = _byteskwargs(opts)
2842 timer, fm = gettimer(ui, opts)
2842 timer, fm = gettimer(ui, opts)
2843 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2843 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2844 fm.end()
2844 fm.end()
2845
2845
2846
2846
2847 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2847 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2848 def perffncacheload(ui, repo, **opts):
2848 def perffncacheload(ui, repo, **opts):
2849 opts = _byteskwargs(opts)
2849 opts = _byteskwargs(opts)
2850 timer, fm = gettimer(ui, opts)
2850 timer, fm = gettimer(ui, opts)
2851 s = repo.store
2851 s = repo.store
2852
2852
2853 def d():
2853 def d():
2854 s.fncache._load()
2854 s.fncache._load()
2855
2855
2856 timer(d)
2856 timer(d)
2857 fm.end()
2857 fm.end()
2858
2858
2859
2859
2860 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2860 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2861 def perffncachewrite(ui, repo, **opts):
2861 def perffncachewrite(ui, repo, **opts):
2862 opts = _byteskwargs(opts)
2862 opts = _byteskwargs(opts)
2863 timer, fm = gettimer(ui, opts)
2863 timer, fm = gettimer(ui, opts)
2864 s = repo.store
2864 s = repo.store
2865 lock = repo.lock()
2865 lock = repo.lock()
2866 s.fncache._load()
2866 s.fncache._load()
2867 tr = repo.transaction(b'perffncachewrite')
2867 tr = repo.transaction(b'perffncachewrite')
2868 tr.addbackup(b'fncache')
2868 tr.addbackup(b'fncache')
2869
2869
2870 def d():
2870 def d():
2871 s.fncache._dirty = True
2871 s.fncache._dirty = True
2872 s.fncache.write(tr)
2872 s.fncache.write(tr)
2873
2873
2874 timer(d)
2874 timer(d)
2875 tr.close()
2875 tr.close()
2876 lock.release()
2876 lock.release()
2877 fm.end()
2877 fm.end()
2878
2878
2879
2879
2880 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2880 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2881 def perffncacheencode(ui, repo, **opts):
2881 def perffncacheencode(ui, repo, **opts):
2882 opts = _byteskwargs(opts)
2882 opts = _byteskwargs(opts)
2883 timer, fm = gettimer(ui, opts)
2883 timer, fm = gettimer(ui, opts)
2884 s = repo.store
2884 s = repo.store
2885 s.fncache._load()
2885 s.fncache._load()
2886
2886
2887 def d():
2887 def d():
2888 for p in s.fncache.entries:
2888 for p in s.fncache.entries:
2889 s.encode(p)
2889 s.encode(p)
2890
2890
2891 timer(d)
2891 timer(d)
2892 fm.end()
2892 fm.end()
2893
2893
2894
2894
2895 def _bdiffworker(q, blocks, xdiff, ready, done):
2895 def _bdiffworker(q, blocks, xdiff, ready, done):
2896 while not done.is_set():
2896 while not done.is_set():
2897 pair = q.get()
2897 pair = q.get()
2898 while pair is not None:
2898 while pair is not None:
2899 if xdiff:
2899 if xdiff:
2900 mdiff.bdiff.xdiffblocks(*pair)
2900 mdiff.bdiff.xdiffblocks(*pair)
2901 elif blocks:
2901 elif blocks:
2902 mdiff.bdiff.blocks(*pair)
2902 mdiff.bdiff.blocks(*pair)
2903 else:
2903 else:
2904 mdiff.textdiff(*pair)
2904 mdiff.textdiff(*pair)
2905 q.task_done()
2905 q.task_done()
2906 pair = q.get()
2906 pair = q.get()
2907 q.task_done() # for the None one
2907 q.task_done() # for the None one
2908 with ready:
2908 with ready:
2909 ready.wait()
2909 ready.wait()
2910
2910
2911
2911
2912 def _manifestrevision(repo, mnode):
2912 def _manifestrevision(repo, mnode):
2913 ml = repo.manifestlog
2913 ml = repo.manifestlog
2914
2914
2915 if util.safehasattr(ml, b'getstorage'):
2915 if util.safehasattr(ml, b'getstorage'):
2916 store = ml.getstorage(b'')
2916 store = ml.getstorage(b'')
2917 else:
2917 else:
2918 store = ml._revlog
2918 store = ml._revlog
2919
2919
2920 return store.revision(mnode)
2920 return store.revision(mnode)
2921
2921
2922
2922
2923 @command(
2923 @command(
2924 b'perf::bdiff|perfbdiff',
2924 b'perf::bdiff|perfbdiff',
2925 revlogopts
2925 revlogopts
2926 + formatteropts
2926 + formatteropts
2927 + [
2927 + [
2928 (
2928 (
2929 b'',
2929 b'',
2930 b'count',
2930 b'count',
2931 1,
2931 1,
2932 b'number of revisions to test (when using --startrev)',
2932 b'number of revisions to test (when using --startrev)',
2933 ),
2933 ),
2934 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2934 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2935 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2935 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2936 (b'', b'blocks', False, b'test computing diffs into blocks'),
2936 (b'', b'blocks', False, b'test computing diffs into blocks'),
2937 (b'', b'xdiff', False, b'use xdiff algorithm'),
2937 (b'', b'xdiff', False, b'use xdiff algorithm'),
2938 ],
2938 ],
2939 b'-c|-m|FILE REV',
2939 b'-c|-m|FILE REV',
2940 )
2940 )
2941 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2941 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2942 """benchmark a bdiff between revisions
2942 """benchmark a bdiff between revisions
2943
2943
2944 By default, benchmark a bdiff between its delta parent and itself.
2944 By default, benchmark a bdiff between its delta parent and itself.
2945
2945
2946 With ``--count``, benchmark bdiffs between delta parents and self for N
2946 With ``--count``, benchmark bdiffs between delta parents and self for N
2947 revisions starting at the specified revision.
2947 revisions starting at the specified revision.
2948
2948
2949 With ``--alldata``, assume the requested revision is a changeset and
2949 With ``--alldata``, assume the requested revision is a changeset and
2950 measure bdiffs for all changes related to that changeset (manifest
2950 measure bdiffs for all changes related to that changeset (manifest
2951 and filelogs).
2951 and filelogs).
2952 """
2952 """
2953 opts = _byteskwargs(opts)
2953 opts = _byteskwargs(opts)
2954
2954
2955 if opts[b'xdiff'] and not opts[b'blocks']:
2955 if opts[b'xdiff'] and not opts[b'blocks']:
2956 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2956 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2957
2957
2958 if opts[b'alldata']:
2958 if opts[b'alldata']:
2959 opts[b'changelog'] = True
2959 opts[b'changelog'] = True
2960
2960
2961 if opts.get(b'changelog') or opts.get(b'manifest'):
2961 if opts.get(b'changelog') or opts.get(b'manifest'):
2962 file_, rev = None, file_
2962 file_, rev = None, file_
2963 elif rev is None:
2963 elif rev is None:
2964 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2964 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2965
2965
2966 blocks = opts[b'blocks']
2966 blocks = opts[b'blocks']
2967 xdiff = opts[b'xdiff']
2967 xdiff = opts[b'xdiff']
2968 textpairs = []
2968 textpairs = []
2969
2969
2970 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2970 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2971
2971
2972 startrev = r.rev(r.lookup(rev))
2972 startrev = r.rev(r.lookup(rev))
2973 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2973 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2974 if opts[b'alldata']:
2974 if opts[b'alldata']:
2975 # Load revisions associated with changeset.
2975 # Load revisions associated with changeset.
2976 ctx = repo[rev]
2976 ctx = repo[rev]
2977 mtext = _manifestrevision(repo, ctx.manifestnode())
2977 mtext = _manifestrevision(repo, ctx.manifestnode())
2978 for pctx in ctx.parents():
2978 for pctx in ctx.parents():
2979 pman = _manifestrevision(repo, pctx.manifestnode())
2979 pman = _manifestrevision(repo, pctx.manifestnode())
2980 textpairs.append((pman, mtext))
2980 textpairs.append((pman, mtext))
2981
2981
2982 # Load filelog revisions by iterating manifest delta.
2982 # Load filelog revisions by iterating manifest delta.
2983 man = ctx.manifest()
2983 man = ctx.manifest()
2984 pman = ctx.p1().manifest()
2984 pman = ctx.p1().manifest()
2985 for filename, change in pman.diff(man).items():
2985 for filename, change in pman.diff(man).items():
2986 fctx = repo.file(filename)
2986 fctx = repo.file(filename)
2987 f1 = fctx.revision(change[0][0] or -1)
2987 f1 = fctx.revision(change[0][0] or -1)
2988 f2 = fctx.revision(change[1][0] or -1)
2988 f2 = fctx.revision(change[1][0] or -1)
2989 textpairs.append((f1, f2))
2989 textpairs.append((f1, f2))
2990 else:
2990 else:
2991 dp = r.deltaparent(rev)
2991 dp = r.deltaparent(rev)
2992 textpairs.append((r.revision(dp), r.revision(rev)))
2992 textpairs.append((r.revision(dp), r.revision(rev)))
2993
2993
2994 withthreads = threads > 0
2994 withthreads = threads > 0
2995 if not withthreads:
2995 if not withthreads:
2996
2996
2997 def d():
2997 def d():
2998 for pair in textpairs:
2998 for pair in textpairs:
2999 if xdiff:
2999 if xdiff:
3000 mdiff.bdiff.xdiffblocks(*pair)
3000 mdiff.bdiff.xdiffblocks(*pair)
3001 elif blocks:
3001 elif blocks:
3002 mdiff.bdiff.blocks(*pair)
3002 mdiff.bdiff.blocks(*pair)
3003 else:
3003 else:
3004 mdiff.textdiff(*pair)
3004 mdiff.textdiff(*pair)
3005
3005
3006 else:
3006 else:
3007 q = queue()
3007 q = queue()
3008 for i in _xrange(threads):
3008 for i in _xrange(threads):
3009 q.put(None)
3009 q.put(None)
3010 ready = threading.Condition()
3010 ready = threading.Condition()
3011 done = threading.Event()
3011 done = threading.Event()
3012 for i in _xrange(threads):
3012 for i in _xrange(threads):
3013 threading.Thread(
3013 threading.Thread(
3014 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
3014 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
3015 ).start()
3015 ).start()
3016 q.join()
3016 q.join()
3017
3017
3018 def d():
3018 def d():
3019 for pair in textpairs:
3019 for pair in textpairs:
3020 q.put(pair)
3020 q.put(pair)
3021 for i in _xrange(threads):
3021 for i in _xrange(threads):
3022 q.put(None)
3022 q.put(None)
3023 with ready:
3023 with ready:
3024 ready.notify_all()
3024 ready.notify_all()
3025 q.join()
3025 q.join()
3026
3026
3027 timer, fm = gettimer(ui, opts)
3027 timer, fm = gettimer(ui, opts)
3028 timer(d)
3028 timer(d)
3029 fm.end()
3029 fm.end()
3030
3030
3031 if withthreads:
3031 if withthreads:
3032 done.set()
3032 done.set()
3033 for i in _xrange(threads):
3033 for i in _xrange(threads):
3034 q.put(None)
3034 q.put(None)
3035 with ready:
3035 with ready:
3036 ready.notify_all()
3036 ready.notify_all()
3037
3037
3038
3038
3039 @command(
3039 @command(
3040 b'perf::unbundle',
3040 b'perf::unbundle',
3041 [
3041 [
3042 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3042 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3043 ]
3043 ]
3044 + formatteropts,
3044 + formatteropts,
3045 b'BUNDLE_FILE',
3045 b'BUNDLE_FILE',
3046 )
3046 )
3047 def perf_unbundle(ui, repo, fname, **opts):
3047 def perf_unbundle(ui, repo, fname, **opts):
3048 """benchmark application of a bundle in a repository.
3048 """benchmark application of a bundle in a repository.
3049
3049
3050 This does not include the final transaction processing
3050 This does not include the final transaction processing
3051
3051
3052 The --as-push option make the unbundle operation appears like it comes from
3052 The --as-push option make the unbundle operation appears like it comes from
3053 a client push. It change some aspect of the processing and associated
3053 a client push. It change some aspect of the processing and associated
3054 performance profile.
3054 performance profile.
3055 """
3055 """
3056
3056
3057 from mercurial import exchange
3057 from mercurial import exchange
3058 from mercurial import bundle2
3058 from mercurial import bundle2
3059 from mercurial import transaction
3059 from mercurial import transaction
3060
3060
3061 opts = _byteskwargs(opts)
3061 opts = _byteskwargs(opts)
3062
3062
3063 ### some compatibility hotfix
3063 ### some compatibility hotfix
3064 #
3064 #
3065 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3065 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3066 # critical regression that break transaction rollback for files that are
3066 # critical regression that break transaction rollback for files that are
3067 # de-inlined.
3067 # de-inlined.
3068 method = transaction.transaction._addentry
3068 method = transaction.transaction._addentry
3069 pre_63edc384d3b7 = "data" in getargspec(method).args
3069 pre_63edc384d3b7 = "data" in getargspec(method).args
3070 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3070 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3071 # a changeset that is a close descendant of 18415fc918a1, the changeset
3071 # a changeset that is a close descendant of 18415fc918a1, the changeset
3072 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3072 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3073 args = getargspec(error.Abort.__init__).args
3073 args = getargspec(error.Abort.__init__).args
3074 post_18415fc918a1 = "detailed_exit_code" in args
3074 post_18415fc918a1 = "detailed_exit_code" in args
3075
3075
3076 unbundle_source = b'perf::unbundle'
3076 unbundle_source = b'perf::unbundle'
3077 if opts[b'as_push']:
3077 if opts[b'as_push']:
3078 unbundle_source = b'push'
3078 unbundle_source = b'push'
3079
3079
3080 old_max_inline = None
3080 old_max_inline = None
3081 try:
3081 try:
3082 if not (pre_63edc384d3b7 or post_18415fc918a1):
3082 if not (pre_63edc384d3b7 or post_18415fc918a1):
3083 # disable inlining
3083 # disable inlining
3084 old_max_inline = mercurial.revlog._maxinline
3084 old_max_inline = mercurial.revlog._maxinline
3085 # large enough to never happen
3085 # large enough to never happen
3086 mercurial.revlog._maxinline = 2 ** 50
3086 mercurial.revlog._maxinline = 2 ** 50
3087
3087
3088 with repo.lock():
3088 with repo.lock():
3089 bundle = [None, None]
3089 bundle = [None, None]
3090 orig_quiet = repo.ui.quiet
3090 orig_quiet = repo.ui.quiet
3091 try:
3091 try:
3092 repo.ui.quiet = True
3092 repo.ui.quiet = True
3093 with open(fname, mode="rb") as f:
3093 with open(fname, mode="rb") as f:
3094
3094
3095 def noop_report(*args, **kwargs):
3095 def noop_report(*args, **kwargs):
3096 pass
3096 pass
3097
3097
3098 def setup():
3098 def setup():
3099 gen, tr = bundle
3099 gen, tr = bundle
3100 if tr is not None:
3100 if tr is not None:
3101 tr.abort()
3101 tr.abort()
3102 bundle[:] = [None, None]
3102 bundle[:] = [None, None]
3103 f.seek(0)
3103 f.seek(0)
3104 bundle[0] = exchange.readbundle(ui, f, fname)
3104 bundle[0] = exchange.readbundle(ui, f, fname)
3105 bundle[1] = repo.transaction(b'perf::unbundle')
3105 bundle[1] = repo.transaction(b'perf::unbundle')
3106 # silence the transaction
3106 # silence the transaction
3107 bundle[1]._report = noop_report
3107 bundle[1]._report = noop_report
3108
3108
3109 def apply():
3109 def apply():
3110 gen, tr = bundle
3110 gen, tr = bundle
3111 bundle2.applybundle(
3111 bundle2.applybundle(
3112 repo,
3112 repo,
3113 gen,
3113 gen,
3114 tr,
3114 tr,
3115 source=unbundle_source,
3115 source=unbundle_source,
3116 url=fname,
3116 url=fname,
3117 )
3117 )
3118
3118
3119 timer, fm = gettimer(ui, opts)
3119 timer, fm = gettimer(ui, opts)
3120 timer(apply, setup=setup)
3120 timer(apply, setup=setup)
3121 fm.end()
3121 fm.end()
3122 finally:
3122 finally:
3123 repo.ui.quiet == orig_quiet
3123 repo.ui.quiet == orig_quiet
3124 gen, tr = bundle
3124 gen, tr = bundle
3125 if tr is not None:
3125 if tr is not None:
3126 tr.abort()
3126 tr.abort()
3127 finally:
3127 finally:
3128 if old_max_inline is not None:
3128 if old_max_inline is not None:
3129 mercurial.revlog._maxinline = old_max_inline
3129 mercurial.revlog._maxinline = old_max_inline
3130
3130
3131
3131
3132 @command(
3132 @command(
3133 b'perf::unidiff|perfunidiff',
3133 b'perf::unidiff|perfunidiff',
3134 revlogopts
3134 revlogopts
3135 + formatteropts
3135 + formatteropts
3136 + [
3136 + [
3137 (
3137 (
3138 b'',
3138 b'',
3139 b'count',
3139 b'count',
3140 1,
3140 1,
3141 b'number of revisions to test (when using --startrev)',
3141 b'number of revisions to test (when using --startrev)',
3142 ),
3142 ),
3143 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3143 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3144 ],
3144 ],
3145 b'-c|-m|FILE REV',
3145 b'-c|-m|FILE REV',
3146 )
3146 )
3147 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3147 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3148 """benchmark a unified diff between revisions
3148 """benchmark a unified diff between revisions
3149
3149
3150 This doesn't include any copy tracing - it's just a unified diff
3150 This doesn't include any copy tracing - it's just a unified diff
3151 of the texts.
3151 of the texts.
3152
3152
3153 By default, benchmark a diff between its delta parent and itself.
3153 By default, benchmark a diff between its delta parent and itself.
3154
3154
3155 With ``--count``, benchmark diffs between delta parents and self for N
3155 With ``--count``, benchmark diffs between delta parents and self for N
3156 revisions starting at the specified revision.
3156 revisions starting at the specified revision.
3157
3157
3158 With ``--alldata``, assume the requested revision is a changeset and
3158 With ``--alldata``, assume the requested revision is a changeset and
3159 measure diffs for all changes related to that changeset (manifest
3159 measure diffs for all changes related to that changeset (manifest
3160 and filelogs).
3160 and filelogs).
3161 """
3161 """
3162 opts = _byteskwargs(opts)
3162 opts = _byteskwargs(opts)
3163 if opts[b'alldata']:
3163 if opts[b'alldata']:
3164 opts[b'changelog'] = True
3164 opts[b'changelog'] = True
3165
3165
3166 if opts.get(b'changelog') or opts.get(b'manifest'):
3166 if opts.get(b'changelog') or opts.get(b'manifest'):
3167 file_, rev = None, file_
3167 file_, rev = None, file_
3168 elif rev is None:
3168 elif rev is None:
3169 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3169 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3170
3170
3171 textpairs = []
3171 textpairs = []
3172
3172
3173 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3173 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3174
3174
3175 startrev = r.rev(r.lookup(rev))
3175 startrev = r.rev(r.lookup(rev))
3176 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3176 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3177 if opts[b'alldata']:
3177 if opts[b'alldata']:
3178 # Load revisions associated with changeset.
3178 # Load revisions associated with changeset.
3179 ctx = repo[rev]
3179 ctx = repo[rev]
3180 mtext = _manifestrevision(repo, ctx.manifestnode())
3180 mtext = _manifestrevision(repo, ctx.manifestnode())
3181 for pctx in ctx.parents():
3181 for pctx in ctx.parents():
3182 pman = _manifestrevision(repo, pctx.manifestnode())
3182 pman = _manifestrevision(repo, pctx.manifestnode())
3183 textpairs.append((pman, mtext))
3183 textpairs.append((pman, mtext))
3184
3184
3185 # Load filelog revisions by iterating manifest delta.
3185 # Load filelog revisions by iterating manifest delta.
3186 man = ctx.manifest()
3186 man = ctx.manifest()
3187 pman = ctx.p1().manifest()
3187 pman = ctx.p1().manifest()
3188 for filename, change in pman.diff(man).items():
3188 for filename, change in pman.diff(man).items():
3189 fctx = repo.file(filename)
3189 fctx = repo.file(filename)
3190 f1 = fctx.revision(change[0][0] or -1)
3190 f1 = fctx.revision(change[0][0] or -1)
3191 f2 = fctx.revision(change[1][0] or -1)
3191 f2 = fctx.revision(change[1][0] or -1)
3192 textpairs.append((f1, f2))
3192 textpairs.append((f1, f2))
3193 else:
3193 else:
3194 dp = r.deltaparent(rev)
3194 dp = r.deltaparent(rev)
3195 textpairs.append((r.revision(dp), r.revision(rev)))
3195 textpairs.append((r.revision(dp), r.revision(rev)))
3196
3196
3197 def d():
3197 def d():
3198 for left, right in textpairs:
3198 for left, right in textpairs:
3199 # The date strings don't matter, so we pass empty strings.
3199 # The date strings don't matter, so we pass empty strings.
3200 headerlines, hunks = mdiff.unidiff(
3200 headerlines, hunks = mdiff.unidiff(
3201 left, b'', right, b'', b'left', b'right', binary=False
3201 left, b'', right, b'', b'left', b'right', binary=False
3202 )
3202 )
3203 # consume iterators in roughly the way patch.py does
3203 # consume iterators in roughly the way patch.py does
3204 b'\n'.join(headerlines)
3204 b'\n'.join(headerlines)
3205 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3205 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3206
3206
3207 timer, fm = gettimer(ui, opts)
3207 timer, fm = gettimer(ui, opts)
3208 timer(d)
3208 timer(d)
3209 fm.end()
3209 fm.end()
3210
3210
3211
3211
3212 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3212 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3213 def perfdiffwd(ui, repo, **opts):
3213 def perfdiffwd(ui, repo, **opts):
3214 """Profile diff of working directory changes"""
3214 """Profile diff of working directory changes"""
3215 opts = _byteskwargs(opts)
3215 opts = _byteskwargs(opts)
3216 timer, fm = gettimer(ui, opts)
3216 timer, fm = gettimer(ui, opts)
3217 options = {
3217 options = {
3218 'w': 'ignore_all_space',
3218 'w': 'ignore_all_space',
3219 'b': 'ignore_space_change',
3219 'b': 'ignore_space_change',
3220 'B': 'ignore_blank_lines',
3220 'B': 'ignore_blank_lines',
3221 }
3221 }
3222
3222
3223 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3223 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3224 opts = {options[c]: b'1' for c in diffopt}
3224 opts = {options[c]: b'1' for c in diffopt}
3225
3225
3226 def d():
3226 def d():
3227 ui.pushbuffer()
3227 ui.pushbuffer()
3228 commands.diff(ui, repo, **opts)
3228 commands.diff(ui, repo, **opts)
3229 ui.popbuffer()
3229 ui.popbuffer()
3230
3230
3231 diffopt = diffopt.encode('ascii')
3231 diffopt = diffopt.encode('ascii')
3232 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3232 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3233 timer(d, title=title)
3233 timer(d, title=title)
3234 fm.end()
3234 fm.end()
3235
3235
3236
3236
3237 @command(
3237 @command(
3238 b'perf::revlogindex|perfrevlogindex',
3238 b'perf::revlogindex|perfrevlogindex',
3239 revlogopts + formatteropts,
3239 revlogopts + formatteropts,
3240 b'-c|-m|FILE',
3240 b'-c|-m|FILE',
3241 )
3241 )
3242 def perfrevlogindex(ui, repo, file_=None, **opts):
3242 def perfrevlogindex(ui, repo, file_=None, **opts):
3243 """Benchmark operations against a revlog index.
3243 """Benchmark operations against a revlog index.
3244
3244
3245 This tests constructing a revlog instance, reading index data,
3245 This tests constructing a revlog instance, reading index data,
3246 parsing index data, and performing various operations related to
3246 parsing index data, and performing various operations related to
3247 index data.
3247 index data.
3248 """
3248 """
3249
3249
3250 opts = _byteskwargs(opts)
3250 opts = _byteskwargs(opts)
3251
3251
3252 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3252 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3253
3253
3254 opener = getattr(rl, 'opener') # trick linter
3254 opener = getattr(rl, 'opener') # trick linter
3255 # compat with hg <= 5.8
3255 # compat with hg <= 5.8
3256 radix = getattr(rl, 'radix', None)
3256 radix = getattr(rl, 'radix', None)
3257 indexfile = getattr(rl, '_indexfile', None)
3257 indexfile = getattr(rl, '_indexfile', None)
3258 if indexfile is None:
3258 if indexfile is None:
3259 # compatibility with <= hg-5.8
3259 # compatibility with <= hg-5.8
3260 indexfile = getattr(rl, 'indexfile')
3260 indexfile = getattr(rl, 'indexfile')
3261 data = opener.read(indexfile)
3261 data = opener.read(indexfile)
3262
3262
3263 header = struct.unpack(b'>I', data[0:4])[0]
3263 header = struct.unpack(b'>I', data[0:4])[0]
3264 version = header & 0xFFFF
3264 version = header & 0xFFFF
3265 if version == 1:
3265 if version == 1:
3266 inline = header & (1 << 16)
3266 inline = header & (1 << 16)
3267 else:
3267 else:
3268 raise error.Abort(b'unsupported revlog version: %d' % version)
3268 raise error.Abort(b'unsupported revlog version: %d' % version)
3269
3269
3270 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3270 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3271 if parse_index_v1 is None:
3271 if parse_index_v1 is None:
3272 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3272 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3273
3273
3274 rllen = len(rl)
3274 rllen = len(rl)
3275
3275
3276 node0 = rl.node(0)
3276 node0 = rl.node(0)
3277 node25 = rl.node(rllen // 4)
3277 node25 = rl.node(rllen // 4)
3278 node50 = rl.node(rllen // 2)
3278 node50 = rl.node(rllen // 2)
3279 node75 = rl.node(rllen // 4 * 3)
3279 node75 = rl.node(rllen // 4 * 3)
3280 node100 = rl.node(rllen - 1)
3280 node100 = rl.node(rllen - 1)
3281
3281
3282 allrevs = range(rllen)
3282 allrevs = range(rllen)
3283 allrevsrev = list(reversed(allrevs))
3283 allrevsrev = list(reversed(allrevs))
3284 allnodes = [rl.node(rev) for rev in range(rllen)]
3284 allnodes = [rl.node(rev) for rev in range(rllen)]
3285 allnodesrev = list(reversed(allnodes))
3285 allnodesrev = list(reversed(allnodes))
3286
3286
3287 def constructor():
3287 def constructor():
3288 if radix is not None:
3288 if radix is not None:
3289 revlog(opener, radix=radix)
3289 revlog(opener, radix=radix)
3290 else:
3290 else:
3291 # hg <= 5.8
3291 # hg <= 5.8
3292 revlog(opener, indexfile=indexfile)
3292 revlog(opener, indexfile=indexfile)
3293
3293
3294 def read():
3294 def read():
3295 with opener(indexfile) as fh:
3295 with opener(indexfile) as fh:
3296 fh.read()
3296 fh.read()
3297
3297
3298 def parseindex():
3298 def parseindex():
3299 parse_index_v1(data, inline)
3299 parse_index_v1(data, inline)
3300
3300
3301 def getentry(revornode):
3301 def getentry(revornode):
3302 index = parse_index_v1(data, inline)[0]
3302 index = parse_index_v1(data, inline)[0]
3303 index[revornode]
3303 index[revornode]
3304
3304
3305 def getentries(revs, count=1):
3305 def getentries(revs, count=1):
3306 index = parse_index_v1(data, inline)[0]
3306 index = parse_index_v1(data, inline)[0]
3307
3307
3308 for i in range(count):
3308 for i in range(count):
3309 for rev in revs:
3309 for rev in revs:
3310 index[rev]
3310 index[rev]
3311
3311
3312 def resolvenode(node):
3312 def resolvenode(node):
3313 index = parse_index_v1(data, inline)[0]
3313 index = parse_index_v1(data, inline)[0]
3314 rev = getattr(index, 'rev', None)
3314 rev = getattr(index, 'rev', None)
3315 if rev is None:
3315 if rev is None:
3316 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3316 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3317 # This only works for the C code.
3317 # This only works for the C code.
3318 if nodemap is None:
3318 if nodemap is None:
3319 return
3319 return
3320 rev = nodemap.__getitem__
3320 rev = nodemap.__getitem__
3321
3321
3322 try:
3322 try:
3323 rev(node)
3323 rev(node)
3324 except error.RevlogError:
3324 except error.RevlogError:
3325 pass
3325 pass
3326
3326
3327 def resolvenodes(nodes, count=1):
3327 def resolvenodes(nodes, count=1):
3328 index = parse_index_v1(data, inline)[0]
3328 index = parse_index_v1(data, inline)[0]
3329 rev = getattr(index, 'rev', None)
3329 rev = getattr(index, 'rev', None)
3330 if rev is None:
3330 if rev is None:
3331 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3331 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3332 # This only works for the C code.
3332 # This only works for the C code.
3333 if nodemap is None:
3333 if nodemap is None:
3334 return
3334 return
3335 rev = nodemap.__getitem__
3335 rev = nodemap.__getitem__
3336
3336
3337 for i in range(count):
3337 for i in range(count):
3338 for node in nodes:
3338 for node in nodes:
3339 try:
3339 try:
3340 rev(node)
3340 rev(node)
3341 except error.RevlogError:
3341 except error.RevlogError:
3342 pass
3342 pass
3343
3343
3344 benches = [
3344 benches = [
3345 (constructor, b'revlog constructor'),
3345 (constructor, b'revlog constructor'),
3346 (read, b'read'),
3346 (read, b'read'),
3347 (parseindex, b'create index object'),
3347 (parseindex, b'create index object'),
3348 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3348 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3349 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3349 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3350 (lambda: resolvenode(node0), b'look up node at rev 0'),
3350 (lambda: resolvenode(node0), b'look up node at rev 0'),
3351 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3351 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3352 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3352 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3353 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3353 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3354 (lambda: resolvenode(node100), b'look up node at tip'),
3354 (lambda: resolvenode(node100), b'look up node at tip'),
3355 # 2x variation is to measure caching impact.
3355 # 2x variation is to measure caching impact.
3356 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3356 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3357 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3357 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3358 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3358 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3359 (
3359 (
3360 lambda: resolvenodes(allnodesrev, 2),
3360 lambda: resolvenodes(allnodesrev, 2),
3361 b'look up all nodes 2x (reverse)',
3361 b'look up all nodes 2x (reverse)',
3362 ),
3362 ),
3363 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3363 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3364 (
3364 (
3365 lambda: getentries(allrevs, 2),
3365 lambda: getentries(allrevs, 2),
3366 b'retrieve all index entries 2x (forward)',
3366 b'retrieve all index entries 2x (forward)',
3367 ),
3367 ),
3368 (
3368 (
3369 lambda: getentries(allrevsrev),
3369 lambda: getentries(allrevsrev),
3370 b'retrieve all index entries (reverse)',
3370 b'retrieve all index entries (reverse)',
3371 ),
3371 ),
3372 (
3372 (
3373 lambda: getentries(allrevsrev, 2),
3373 lambda: getentries(allrevsrev, 2),
3374 b'retrieve all index entries 2x (reverse)',
3374 b'retrieve all index entries 2x (reverse)',
3375 ),
3375 ),
3376 ]
3376 ]
3377
3377
3378 for fn, title in benches:
3378 for fn, title in benches:
3379 timer, fm = gettimer(ui, opts)
3379 timer, fm = gettimer(ui, opts)
3380 timer(fn, title=title)
3380 timer(fn, title=title)
3381 fm.end()
3381 fm.end()
3382
3382
3383
3383
3384 @command(
3384 @command(
3385 b'perf::revlogrevisions|perfrevlogrevisions',
3385 b'perf::revlogrevisions|perfrevlogrevisions',
3386 revlogopts
3386 revlogopts
3387 + formatteropts
3387 + formatteropts
3388 + [
3388 + [
3389 (b'd', b'dist', 100, b'distance between the revisions'),
3389 (b'd', b'dist', 100, b'distance between the revisions'),
3390 (b's', b'startrev', 0, b'revision to start reading at'),
3390 (b's', b'startrev', 0, b'revision to start reading at'),
3391 (b'', b'reverse', False, b'read in reverse'),
3391 (b'', b'reverse', False, b'read in reverse'),
3392 ],
3392 ],
3393 b'-c|-m|FILE',
3393 b'-c|-m|FILE',
3394 )
3394 )
3395 def perfrevlogrevisions(
3395 def perfrevlogrevisions(
3396 ui, repo, file_=None, startrev=0, reverse=False, **opts
3396 ui, repo, file_=None, startrev=0, reverse=False, **opts
3397 ):
3397 ):
3398 """Benchmark reading a series of revisions from a revlog.
3398 """Benchmark reading a series of revisions from a revlog.
3399
3399
3400 By default, we read every ``-d/--dist`` revision from 0 to tip of
3400 By default, we read every ``-d/--dist`` revision from 0 to tip of
3401 the specified revlog.
3401 the specified revlog.
3402
3402
3403 The start revision can be defined via ``-s/--startrev``.
3403 The start revision can be defined via ``-s/--startrev``.
3404 """
3404 """
3405 opts = _byteskwargs(opts)
3405 opts = _byteskwargs(opts)
3406
3406
3407 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3407 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3408 rllen = getlen(ui)(rl)
3408 rllen = getlen(ui)(rl)
3409
3409
3410 if startrev < 0:
3410 if startrev < 0:
3411 startrev = rllen + startrev
3411 startrev = rllen + startrev
3412
3412
3413 def d():
3413 def d():
3414 rl.clearcaches()
3414 rl.clearcaches()
3415
3415
3416 beginrev = startrev
3416 beginrev = startrev
3417 endrev = rllen
3417 endrev = rllen
3418 dist = opts[b'dist']
3418 dist = opts[b'dist']
3419
3419
3420 if reverse:
3420 if reverse:
3421 beginrev, endrev = endrev - 1, beginrev - 1
3421 beginrev, endrev = endrev - 1, beginrev - 1
3422 dist = -1 * dist
3422 dist = -1 * dist
3423
3423
3424 for x in _xrange(beginrev, endrev, dist):
3424 for x in _xrange(beginrev, endrev, dist):
3425 # Old revisions don't support passing int.
3425 # Old revisions don't support passing int.
3426 n = rl.node(x)
3426 n = rl.node(x)
3427 rl.revision(n)
3427 rl.revision(n)
3428
3428
3429 timer, fm = gettimer(ui, opts)
3429 timer, fm = gettimer(ui, opts)
3430 timer(d)
3430 timer(d)
3431 fm.end()
3431 fm.end()
3432
3432
3433
3433
3434 @command(
3434 @command(
3435 b'perf::revlogwrite|perfrevlogwrite',
3435 b'perf::revlogwrite|perfrevlogwrite',
3436 revlogopts
3436 revlogopts
3437 + formatteropts
3437 + formatteropts
3438 + [
3438 + [
3439 (b's', b'startrev', 1000, b'revision to start writing at'),
3439 (b's', b'startrev', 1000, b'revision to start writing at'),
3440 (b'', b'stoprev', -1, b'last revision to write'),
3440 (b'', b'stoprev', -1, b'last revision to write'),
3441 (b'', b'count', 3, b'number of passes to perform'),
3441 (b'', b'count', 3, b'number of passes to perform'),
3442 (b'', b'details', False, b'print timing for every revisions tested'),
3442 (b'', b'details', False, b'print timing for every revisions tested'),
3443 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3443 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3444 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3444 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3445 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3445 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3446 ],
3446 ],
3447 b'-c|-m|FILE',
3447 b'-c|-m|FILE',
3448 )
3448 )
3449 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3449 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3450 """Benchmark writing a series of revisions to a revlog.
3450 """Benchmark writing a series of revisions to a revlog.
3451
3451
3452 Possible source values are:
3452 Possible source values are:
3453 * `full`: add from a full text (default).
3453 * `full`: add from a full text (default).
3454 * `parent-1`: add from a delta to the first parent
3454 * `parent-1`: add from a delta to the first parent
3455 * `parent-2`: add from a delta to the second parent if it exists
3455 * `parent-2`: add from a delta to the second parent if it exists
3456 (use a delta from the first parent otherwise)
3456 (use a delta from the first parent otherwise)
3457 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3457 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3458 * `storage`: add from the existing precomputed deltas
3458 * `storage`: add from the existing precomputed deltas
3459
3459
3460 Note: This performance command measures performance in a custom way. As a
3460 Note: This performance command measures performance in a custom way. As a
3461 result some of the global configuration of the 'perf' command does not
3461 result some of the global configuration of the 'perf' command does not
3462 apply to it:
3462 apply to it:
3463
3463
3464 * ``pre-run``: disabled
3464 * ``pre-run``: disabled
3465
3465
3466 * ``profile-benchmark``: disabled
3466 * ``profile-benchmark``: disabled
3467
3467
3468 * ``run-limits``: disabled use --count instead
3468 * ``run-limits``: disabled use --count instead
3469 """
3469 """
3470 opts = _byteskwargs(opts)
3470 opts = _byteskwargs(opts)
3471
3471
3472 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3472 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3473 rllen = getlen(ui)(rl)
3473 rllen = getlen(ui)(rl)
3474 if startrev < 0:
3474 if startrev < 0:
3475 startrev = rllen + startrev
3475 startrev = rllen + startrev
3476 if stoprev < 0:
3476 if stoprev < 0:
3477 stoprev = rllen + stoprev
3477 stoprev = rllen + stoprev
3478
3478
3479 lazydeltabase = opts['lazydeltabase']
3479 lazydeltabase = opts['lazydeltabase']
3480 source = opts['source']
3480 source = opts['source']
3481 clearcaches = opts['clear_caches']
3481 clearcaches = opts['clear_caches']
3482 validsource = (
3482 validsource = (
3483 b'full',
3483 b'full',
3484 b'parent-1',
3484 b'parent-1',
3485 b'parent-2',
3485 b'parent-2',
3486 b'parent-smallest',
3486 b'parent-smallest',
3487 b'storage',
3487 b'storage',
3488 )
3488 )
3489 if source not in validsource:
3489 if source not in validsource:
3490 raise error.Abort('invalid source type: %s' % source)
3490 raise error.Abort('invalid source type: %s' % source)
3491
3491
3492 ### actually gather results
3492 ### actually gather results
3493 count = opts['count']
3493 count = opts['count']
3494 if count <= 0:
3494 if count <= 0:
3495 raise error.Abort('invalide run count: %d' % count)
3495 raise error.Abort('invalide run count: %d' % count)
3496 allresults = []
3496 allresults = []
3497 for c in range(count):
3497 for c in range(count):
3498 timing = _timeonewrite(
3498 timing = _timeonewrite(
3499 ui,
3499 ui,
3500 rl,
3500 rl,
3501 source,
3501 source,
3502 startrev,
3502 startrev,
3503 stoprev,
3503 stoprev,
3504 c + 1,
3504 c + 1,
3505 lazydeltabase=lazydeltabase,
3505 lazydeltabase=lazydeltabase,
3506 clearcaches=clearcaches,
3506 clearcaches=clearcaches,
3507 )
3507 )
3508 allresults.append(timing)
3508 allresults.append(timing)
3509
3509
3510 ### consolidate the results in a single list
3510 ### consolidate the results in a single list
3511 results = []
3511 results = []
3512 for idx, (rev, t) in enumerate(allresults[0]):
3512 for idx, (rev, t) in enumerate(allresults[0]):
3513 ts = [t]
3513 ts = [t]
3514 for other in allresults[1:]:
3514 for other in allresults[1:]:
3515 orev, ot = other[idx]
3515 orev, ot = other[idx]
3516 assert orev == rev
3516 assert orev == rev
3517 ts.append(ot)
3517 ts.append(ot)
3518 results.append((rev, ts))
3518 results.append((rev, ts))
3519 resultcount = len(results)
3519 resultcount = len(results)
3520
3520
3521 ### Compute and display relevant statistics
3521 ### Compute and display relevant statistics
3522
3522
3523 # get a formatter
3523 # get a formatter
3524 fm = ui.formatter(b'perf', opts)
3524 fm = ui.formatter(b'perf', opts)
3525 displayall = ui.configbool(b"perf", b"all-timing", True)
3525 displayall = ui.configbool(b"perf", b"all-timing", True)
3526
3526
3527 # print individual details if requested
3527 # print individual details if requested
3528 if opts['details']:
3528 if opts['details']:
3529 for idx, item in enumerate(results, 1):
3529 for idx, item in enumerate(results, 1):
3530 rev, data = item
3530 rev, data = item
3531 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3531 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3532 formatone(fm, data, title=title, displayall=displayall)
3532 formatone(fm, data, title=title, displayall=displayall)
3533
3533
3534 # sorts results by median time
3534 # sorts results by median time
3535 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3535 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3536 # list of (name, index) to display)
3536 # list of (name, index) to display)
3537 relevants = [
3537 relevants = [
3538 ("min", 0),
3538 ("min", 0),
3539 ("10%", resultcount * 10 // 100),
3539 ("10%", resultcount * 10 // 100),
3540 ("25%", resultcount * 25 // 100),
3540 ("25%", resultcount * 25 // 100),
3541 ("50%", resultcount * 70 // 100),
3541 ("50%", resultcount * 70 // 100),
3542 ("75%", resultcount * 75 // 100),
3542 ("75%", resultcount * 75 // 100),
3543 ("90%", resultcount * 90 // 100),
3543 ("90%", resultcount * 90 // 100),
3544 ("95%", resultcount * 95 // 100),
3544 ("95%", resultcount * 95 // 100),
3545 ("99%", resultcount * 99 // 100),
3545 ("99%", resultcount * 99 // 100),
3546 ("99.9%", resultcount * 999 // 1000),
3546 ("99.9%", resultcount * 999 // 1000),
3547 ("99.99%", resultcount * 9999 // 10000),
3547 ("99.99%", resultcount * 9999 // 10000),
3548 ("99.999%", resultcount * 99999 // 100000),
3548 ("99.999%", resultcount * 99999 // 100000),
3549 ("max", -1),
3549 ("max", -1),
3550 ]
3550 ]
3551 if not ui.quiet:
3551 if not ui.quiet:
3552 for name, idx in relevants:
3552 for name, idx in relevants:
3553 data = results[idx]
3553 data = results[idx]
3554 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3554 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3555 formatone(fm, data[1], title=title, displayall=displayall)
3555 formatone(fm, data[1], title=title, displayall=displayall)
3556
3556
3557 # XXX summing that many float will not be very precise, we ignore this fact
3557 # XXX summing that many float will not be very precise, we ignore this fact
3558 # for now
3558 # for now
3559 totaltime = []
3559 totaltime = []
3560 for item in allresults:
3560 for item in allresults:
3561 totaltime.append(
3561 totaltime.append(
3562 (
3562 (
3563 sum(x[1][0] for x in item),
3563 sum(x[1][0] for x in item),
3564 sum(x[1][1] for x in item),
3564 sum(x[1][1] for x in item),
3565 sum(x[1][2] for x in item),
3565 sum(x[1][2] for x in item),
3566 )
3566 )
3567 )
3567 )
3568 formatone(
3568 formatone(
3569 fm,
3569 fm,
3570 totaltime,
3570 totaltime,
3571 title="total time (%d revs)" % resultcount,
3571 title="total time (%d revs)" % resultcount,
3572 displayall=displayall,
3572 displayall=displayall,
3573 )
3573 )
3574 fm.end()
3574 fm.end()
3575
3575
3576
3576
3577 class _faketr:
3577 class _faketr:
3578 def add(s, x, y, z=None):
3578 def add(s, x, y, z=None):
3579 return None
3579 return None
3580
3580
3581
3581
3582 def _timeonewrite(
3582 def _timeonewrite(
3583 ui,
3583 ui,
3584 orig,
3584 orig,
3585 source,
3585 source,
3586 startrev,
3586 startrev,
3587 stoprev,
3587 stoprev,
3588 runidx=None,
3588 runidx=None,
3589 lazydeltabase=True,
3589 lazydeltabase=True,
3590 clearcaches=True,
3590 clearcaches=True,
3591 ):
3591 ):
3592 timings = []
3592 timings = []
3593 tr = _faketr()
3593 tr = _faketr()
3594 with _temprevlog(ui, orig, startrev) as dest:
3594 with _temprevlog(ui, orig, startrev) as dest:
3595 if hasattr(dest, "delta_config"):
3595 if hasattr(dest, "delta_config"):
3596 dest.delta_config.lazy_delta_base = lazydeltabase
3596 dest.delta_config.lazy_delta_base = lazydeltabase
3597 else:
3597 else:
3598 dest._lazydeltabase = lazydeltabase
3598 dest._lazydeltabase = lazydeltabase
3599 revs = list(orig.revs(startrev, stoprev))
3599 revs = list(orig.revs(startrev, stoprev))
3600 total = len(revs)
3600 total = len(revs)
3601 topic = 'adding'
3601 topic = 'adding'
3602 if runidx is not None:
3602 if runidx is not None:
3603 topic += ' (run #%d)' % runidx
3603 topic += ' (run #%d)' % runidx
3604 # Support both old and new progress API
3604 # Support both old and new progress API
3605 if util.safehasattr(ui, 'makeprogress'):
3605 if util.safehasattr(ui, 'makeprogress'):
3606 progress = ui.makeprogress(topic, unit='revs', total=total)
3606 progress = ui.makeprogress(topic, unit='revs', total=total)
3607
3607
3608 def updateprogress(pos):
3608 def updateprogress(pos):
3609 progress.update(pos)
3609 progress.update(pos)
3610
3610
3611 def completeprogress():
3611 def completeprogress():
3612 progress.complete()
3612 progress.complete()
3613
3613
3614 else:
3614 else:
3615
3615
3616 def updateprogress(pos):
3616 def updateprogress(pos):
3617 ui.progress(topic, pos, unit='revs', total=total)
3617 ui.progress(topic, pos, unit='revs', total=total)
3618
3618
3619 def completeprogress():
3619 def completeprogress():
3620 ui.progress(topic, None, unit='revs', total=total)
3620 ui.progress(topic, None, unit='revs', total=total)
3621
3621
3622 for idx, rev in enumerate(revs):
3622 for idx, rev in enumerate(revs):
3623 updateprogress(idx)
3623 updateprogress(idx)
3624 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3624 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3625 if clearcaches:
3625 if clearcaches:
3626 dest.index.clearcaches()
3626 dest.index.clearcaches()
3627 dest.clearcaches()
3627 dest.clearcaches()
3628 with timeone() as r:
3628 with timeone() as r:
3629 dest.addrawrevision(*addargs, **addkwargs)
3629 dest.addrawrevision(*addargs, **addkwargs)
3630 timings.append((rev, r[0]))
3630 timings.append((rev, r[0]))
3631 updateprogress(total)
3631 updateprogress(total)
3632 completeprogress()
3632 completeprogress()
3633 return timings
3633 return timings
3634
3634
3635
3635
3636 def _getrevisionseed(orig, rev, tr, source):
3636 def _getrevisionseed(orig, rev, tr, source):
3637 from mercurial.node import nullid
3637 from mercurial.node import nullid
3638
3638
3639 linkrev = orig.linkrev(rev)
3639 linkrev = orig.linkrev(rev)
3640 node = orig.node(rev)
3640 node = orig.node(rev)
3641 p1, p2 = orig.parents(node)
3641 p1, p2 = orig.parents(node)
3642 flags = orig.flags(rev)
3642 flags = orig.flags(rev)
3643 cachedelta = None
3643 cachedelta = None
3644 text = None
3644 text = None
3645
3645
3646 if source == b'full':
3646 if source == b'full':
3647 text = orig.revision(rev)
3647 text = orig.revision(rev)
3648 elif source == b'parent-1':
3648 elif source == b'parent-1':
3649 baserev = orig.rev(p1)
3649 baserev = orig.rev(p1)
3650 cachedelta = (baserev, orig.revdiff(p1, rev))
3650 cachedelta = (baserev, orig.revdiff(p1, rev))
3651 elif source == b'parent-2':
3651 elif source == b'parent-2':
3652 parent = p2
3652 parent = p2
3653 if p2 == nullid:
3653 if p2 == nullid:
3654 parent = p1
3654 parent = p1
3655 baserev = orig.rev(parent)
3655 baserev = orig.rev(parent)
3656 cachedelta = (baserev, orig.revdiff(parent, rev))
3656 cachedelta = (baserev, orig.revdiff(parent, rev))
3657 elif source == b'parent-smallest':
3657 elif source == b'parent-smallest':
3658 p1diff = orig.revdiff(p1, rev)
3658 p1diff = orig.revdiff(p1, rev)
3659 parent = p1
3659 parent = p1
3660 diff = p1diff
3660 diff = p1diff
3661 if p2 != nullid:
3661 if p2 != nullid:
3662 p2diff = orig.revdiff(p2, rev)
3662 p2diff = orig.revdiff(p2, rev)
3663 if len(p1diff) > len(p2diff):
3663 if len(p1diff) > len(p2diff):
3664 parent = p2
3664 parent = p2
3665 diff = p2diff
3665 diff = p2diff
3666 baserev = orig.rev(parent)
3666 baserev = orig.rev(parent)
3667 cachedelta = (baserev, diff)
3667 cachedelta = (baserev, diff)
3668 elif source == b'storage':
3668 elif source == b'storage':
3669 baserev = orig.deltaparent(rev)
3669 baserev = orig.deltaparent(rev)
3670 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3670 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3671
3671
3672 return (
3672 return (
3673 (text, tr, linkrev, p1, p2),
3673 (text, tr, linkrev, p1, p2),
3674 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3674 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3675 )
3675 )
3676
3676
3677
3677
3678 @contextlib.contextmanager
3678 @contextlib.contextmanager
3679 def _temprevlog(ui, orig, truncaterev):
3679 def _temprevlog(ui, orig, truncaterev):
3680 from mercurial import vfs as vfsmod
3680 from mercurial import vfs as vfsmod
3681
3681
3682 if orig._inline:
3682 if orig._inline:
3683 raise error.Abort('not supporting inline revlog (yet)')
3683 raise error.Abort('not supporting inline revlog (yet)')
3684 revlogkwargs = {}
3684 revlogkwargs = {}
3685 k = 'upperboundcomp'
3685 k = 'upperboundcomp'
3686 if util.safehasattr(orig, k):
3686 if util.safehasattr(orig, k):
3687 revlogkwargs[k] = getattr(orig, k)
3687 revlogkwargs[k] = getattr(orig, k)
3688
3688
3689 indexfile = getattr(orig, '_indexfile', None)
3689 indexfile = getattr(orig, '_indexfile', None)
3690 if indexfile is None:
3690 if indexfile is None:
3691 # compatibility with <= hg-5.8
3691 # compatibility with <= hg-5.8
3692 indexfile = getattr(orig, 'indexfile')
3692 indexfile = getattr(orig, 'indexfile')
3693 origindexpath = orig.opener.join(indexfile)
3693 origindexpath = orig.opener.join(indexfile)
3694
3694
3695 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3695 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3696 origdatapath = orig.opener.join(datafile)
3696 origdatapath = orig.opener.join(datafile)
3697 radix = b'revlog'
3697 radix = b'revlog'
3698 indexname = b'revlog.i'
3698 indexname = b'revlog.i'
3699 dataname = b'revlog.d'
3699 dataname = b'revlog.d'
3700
3700
3701 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3701 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3702 try:
3702 try:
3703 # copy the data file in a temporary directory
3703 # copy the data file in a temporary directory
3704 ui.debug('copying data in %s\n' % tmpdir)
3704 ui.debug('copying data in %s\n' % tmpdir)
3705 destindexpath = os.path.join(tmpdir, 'revlog.i')
3705 destindexpath = os.path.join(tmpdir, 'revlog.i')
3706 destdatapath = os.path.join(tmpdir, 'revlog.d')
3706 destdatapath = os.path.join(tmpdir, 'revlog.d')
3707 shutil.copyfile(origindexpath, destindexpath)
3707 shutil.copyfile(origindexpath, destindexpath)
3708 shutil.copyfile(origdatapath, destdatapath)
3708 shutil.copyfile(origdatapath, destdatapath)
3709
3709
3710 # remove the data we want to add again
3710 # remove the data we want to add again
3711 ui.debug('truncating data to be rewritten\n')
3711 ui.debug('truncating data to be rewritten\n')
3712 with open(destindexpath, 'ab') as index:
3712 with open(destindexpath, 'ab') as index:
3713 index.seek(0)
3713 index.seek(0)
3714 index.truncate(truncaterev * orig._io.size)
3714 index.truncate(truncaterev * orig._io.size)
3715 with open(destdatapath, 'ab') as data:
3715 with open(destdatapath, 'ab') as data:
3716 data.seek(0)
3716 data.seek(0)
3717 data.truncate(orig.start(truncaterev))
3717 data.truncate(orig.start(truncaterev))
3718
3718
3719 # instantiate a new revlog from the temporary copy
3719 # instantiate a new revlog from the temporary copy
3720 ui.debug('truncating adding to be rewritten\n')
3720 ui.debug('truncating adding to be rewritten\n')
3721 vfs = vfsmod.vfs(tmpdir)
3721 vfs = vfsmod.vfs(tmpdir)
3722 vfs.options = getattr(orig.opener, 'options', None)
3722 vfs.options = getattr(orig.opener, 'options', None)
3723
3723
3724 try:
3724 try:
3725 dest = revlog(vfs, radix=radix, **revlogkwargs)
3725 dest = revlog(vfs, radix=radix, **revlogkwargs)
3726 except TypeError:
3726 except TypeError:
3727 dest = revlog(
3727 dest = revlog(
3728 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3728 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3729 )
3729 )
3730 if dest._inline:
3730 if dest._inline:
3731 raise error.Abort('not supporting inline revlog (yet)')
3731 raise error.Abort('not supporting inline revlog (yet)')
3732 # make sure internals are initialized
3732 # make sure internals are initialized
3733 dest.revision(len(dest) - 1)
3733 dest.revision(len(dest) - 1)
3734 yield dest
3734 yield dest
3735 del dest, vfs
3735 del dest, vfs
3736 finally:
3736 finally:
3737 shutil.rmtree(tmpdir, True)
3737 shutil.rmtree(tmpdir, True)
3738
3738
3739
3739
3740 @command(
3740 @command(
3741 b'perf::revlogchunks|perfrevlogchunks',
3741 b'perf::revlogchunks|perfrevlogchunks',
3742 revlogopts
3742 revlogopts
3743 + formatteropts
3743 + formatteropts
3744 + [
3744 + [
3745 (b'e', b'engines', b'', b'compression engines to use'),
3745 (b'e', b'engines', b'', b'compression engines to use'),
3746 (b's', b'startrev', 0, b'revision to start at'),
3746 (b's', b'startrev', 0, b'revision to start at'),
3747 ],
3747 ],
3748 b'-c|-m|FILE',
3748 b'-c|-m|FILE',
3749 )
3749 )
3750 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3750 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3751 """Benchmark operations on revlog chunks.
3751 """Benchmark operations on revlog chunks.
3752
3752
3753 Logically, each revlog is a collection of fulltext revisions. However,
3753 Logically, each revlog is a collection of fulltext revisions. However,
3754 stored within each revlog are "chunks" of possibly compressed data. This
3754 stored within each revlog are "chunks" of possibly compressed data. This
3755 data needs to be read and decompressed or compressed and written.
3755 data needs to be read and decompressed or compressed and written.
3756
3756
3757 This command measures the time it takes to read+decompress and recompress
3757 This command measures the time it takes to read+decompress and recompress
3758 chunks in a revlog. It effectively isolates I/O and compression performance.
3758 chunks in a revlog. It effectively isolates I/O and compression performance.
3759 For measurements of higher-level operations like resolving revisions,
3759 For measurements of higher-level operations like resolving revisions,
3760 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3760 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3761 """
3761 """
3762 opts = _byteskwargs(opts)
3762 opts = _byteskwargs(opts)
3763
3763
3764 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3764 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3765
3765
3766 # - _chunkraw was renamed to _getsegmentforrevs
3766 # - _chunkraw was renamed to _getsegmentforrevs
3767 # - _getsegmentforrevs was moved on the inner object
3767 # - _getsegmentforrevs was moved on the inner object
3768 try:
3768 try:
3769 segmentforrevs = rl._inner.get_segment_for_revs
3769 segmentforrevs = rl._inner.get_segment_for_revs
3770 except AttributeError:
3770 except AttributeError:
3771 try:
3771 try:
3772 segmentforrevs = rl._getsegmentforrevs
3772 segmentforrevs = rl._getsegmentforrevs
3773 except AttributeError:
3773 except AttributeError:
3774 segmentforrevs = rl._chunkraw
3774 segmentforrevs = rl._chunkraw
3775
3775
3776 # Verify engines argument.
3776 # Verify engines argument.
3777 if engines:
3777 if engines:
3778 engines = {e.strip() for e in engines.split(b',')}
3778 engines = {e.strip() for e in engines.split(b',')}
3779 for engine in engines:
3779 for engine in engines:
3780 try:
3780 try:
3781 util.compressionengines[engine]
3781 util.compressionengines[engine]
3782 except KeyError:
3782 except KeyError:
3783 raise error.Abort(b'unknown compression engine: %s' % engine)
3783 raise error.Abort(b'unknown compression engine: %s' % engine)
3784 else:
3784 else:
3785 engines = []
3785 engines = []
3786 for e in util.compengines:
3786 for e in util.compengines:
3787 engine = util.compengines[e]
3787 engine = util.compengines[e]
3788 try:
3788 try:
3789 if engine.available():
3789 if engine.available():
3790 engine.revlogcompressor().compress(b'dummy')
3790 engine.revlogcompressor().compress(b'dummy')
3791 engines.append(e)
3791 engines.append(e)
3792 except NotImplementedError:
3792 except NotImplementedError:
3793 pass
3793 pass
3794
3794
3795 revs = list(rl.revs(startrev, len(rl) - 1))
3795 revs = list(rl.revs(startrev, len(rl) - 1))
3796
3796
3797 @contextlib.contextmanager
3797 @contextlib.contextmanager
3798 def reading(rl):
3798 def reading(rl):
3799 if getattr(rl, 'reading', None) is not None:
3799 if getattr(rl, 'reading', None) is not None:
3800 with rl.reading():
3800 with rl.reading():
3801 yield None
3801 yield None
3802 elif rl._inline:
3802 elif rl._inline:
3803 indexfile = getattr(rl, '_indexfile', None)
3803 indexfile = getattr(rl, '_indexfile', None)
3804 if indexfile is None:
3804 if indexfile is None:
3805 # compatibility with <= hg-5.8
3805 # compatibility with <= hg-5.8
3806 indexfile = getattr(rl, 'indexfile')
3806 indexfile = getattr(rl, 'indexfile')
3807 yield getsvfs(repo)(indexfile)
3807 yield getsvfs(repo)(indexfile)
3808 else:
3808 else:
3809 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3809 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3810 yield getsvfs(repo)(datafile)
3810 yield getsvfs(repo)(datafile)
3811
3811
3812 if getattr(rl, 'reading', None) is not None:
3812 if getattr(rl, 'reading', None) is not None:
3813
3813
3814 @contextlib.contextmanager
3814 @contextlib.contextmanager
3815 def lazy_reading(rl):
3815 def lazy_reading(rl):
3816 with rl.reading():
3816 with rl.reading():
3817 yield
3817 yield
3818
3818
3819 else:
3819 else:
3820
3820
3821 @contextlib.contextmanager
3821 @contextlib.contextmanager
3822 def lazy_reading(rl):
3822 def lazy_reading(rl):
3823 yield
3823 yield
3824
3824
3825 def doread():
3825 def doread():
3826 rl.clearcaches()
3826 rl.clearcaches()
3827 for rev in revs:
3827 for rev in revs:
3828 with lazy_reading(rl):
3828 with lazy_reading(rl):
3829 segmentforrevs(rev, rev)
3829 segmentforrevs(rev, rev)
3830
3830
3831 def doreadcachedfh():
3831 def doreadcachedfh():
3832 rl.clearcaches()
3832 rl.clearcaches()
3833 with reading(rl) as fh:
3833 with reading(rl) as fh:
3834 if fh is not None:
3834 if fh is not None:
3835 for rev in revs:
3835 for rev in revs:
3836 segmentforrevs(rev, rev, df=fh)
3836 segmentforrevs(rev, rev, df=fh)
3837 else:
3837 else:
3838 for rev in revs:
3838 for rev in revs:
3839 segmentforrevs(rev, rev)
3839 segmentforrevs(rev, rev)
3840
3840
3841 def doreadbatch():
3841 def doreadbatch():
3842 rl.clearcaches()
3842 rl.clearcaches()
3843 with lazy_reading(rl):
3843 with lazy_reading(rl):
3844 segmentforrevs(revs[0], revs[-1])
3844 segmentforrevs(revs[0], revs[-1])
3845
3845
3846 def doreadbatchcachedfh():
3846 def doreadbatchcachedfh():
3847 rl.clearcaches()
3847 rl.clearcaches()
3848 with reading(rl) as fh:
3848 with reading(rl) as fh:
3849 if fh is not None:
3849 if fh is not None:
3850 segmentforrevs(revs[0], revs[-1], df=fh)
3850 segmentforrevs(revs[0], revs[-1], df=fh)
3851 else:
3851 else:
3852 segmentforrevs(revs[0], revs[-1])
3852 segmentforrevs(revs[0], revs[-1])
3853
3853
3854 def dochunk():
3854 def dochunk():
3855 rl.clearcaches()
3855 rl.clearcaches()
3856 # chunk used to be available directly on the revlog
3856 # chunk used to be available directly on the revlog
3857 _chunk = getattr(rl, '_inner', rl)._chunk
3857 _chunk = getattr(rl, '_inner', rl)._chunk
3858 with reading(rl) as fh:
3858 with reading(rl) as fh:
3859 if fh is not None:
3859 if fh is not None:
3860 for rev in revs:
3860 for rev in revs:
3861 _chunk(rev, df=fh)
3861 _chunk(rev, df=fh)
3862 else:
3862 else:
3863 for rev in revs:
3863 for rev in revs:
3864 _chunk(rev)
3864 _chunk(rev)
3865
3865
3866 chunks = [None]
3866 chunks = [None]
3867
3867
3868 def dochunkbatch():
3868 def dochunkbatch():
3869 rl.clearcaches()
3869 rl.clearcaches()
3870 _chunks = getattr(rl, '_inner', rl)._chunks
3870 _chunks = getattr(rl, '_inner', rl)._chunks
3871 with reading(rl) as fh:
3871 with reading(rl) as fh:
3872 if fh is not None:
3872 if fh is not None:
3873 # Save chunks as a side-effect.
3873 # Save chunks as a side-effect.
3874 chunks[0] = _chunks(revs, df=fh)
3874 chunks[0] = _chunks(revs, df=fh)
3875 else:
3875 else:
3876 # Save chunks as a side-effect.
3876 # Save chunks as a side-effect.
3877 chunks[0] = _chunks(revs)
3877 chunks[0] = _chunks(revs)
3878
3878
3879 def docompress(compressor):
3879 def docompress(compressor):
3880 rl.clearcaches()
3880 rl.clearcaches()
3881
3881
3882 compressor_holder = getattr(rl, '_inner', rl)
3882 compressor_holder = getattr(rl, '_inner', rl)
3883
3883
3884 try:
3884 try:
3885 # Swap in the requested compression engine.
3885 # Swap in the requested compression engine.
3886 oldcompressor = compressor_holder._compressor
3886 oldcompressor = compressor_holder._compressor
3887 compressor_holder._compressor = compressor
3887 compressor_holder._compressor = compressor
3888 for chunk in chunks[0]:
3888 for chunk in chunks[0]:
3889 rl.compress(chunk)
3889 rl.compress(chunk)
3890 finally:
3890 finally:
3891 compressor_holder._compressor = oldcompressor
3891 compressor_holder._compressor = oldcompressor
3892
3892
3893 benches = [
3893 benches = [
3894 (lambda: doread(), b'read'),
3894 (lambda: doread(), b'read'),
3895 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3895 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3896 (lambda: doreadbatch(), b'read batch'),
3896 (lambda: doreadbatch(), b'read batch'),
3897 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3897 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3898 (lambda: dochunk(), b'chunk'),
3898 (lambda: dochunk(), b'chunk'),
3899 (lambda: dochunkbatch(), b'chunk batch'),
3899 (lambda: dochunkbatch(), b'chunk batch'),
3900 ]
3900 ]
3901
3901
3902 for engine in sorted(engines):
3902 for engine in sorted(engines):
3903 compressor = util.compengines[engine].revlogcompressor()
3903 compressor = util.compengines[engine].revlogcompressor()
3904 benches.append(
3904 benches.append(
3905 (
3905 (
3906 functools.partial(docompress, compressor),
3906 functools.partial(docompress, compressor),
3907 b'compress w/ %s' % engine,
3907 b'compress w/ %s' % engine,
3908 )
3908 )
3909 )
3909 )
3910
3910
3911 for fn, title in benches:
3911 for fn, title in benches:
3912 timer, fm = gettimer(ui, opts)
3912 timer, fm = gettimer(ui, opts)
3913 timer(fn, title=title)
3913 timer(fn, title=title)
3914 fm.end()
3914 fm.end()
3915
3915
3916
3916
3917 @command(
3917 @command(
3918 b'perf::revlogrevision|perfrevlogrevision',
3918 b'perf::revlogrevision|perfrevlogrevision',
3919 revlogopts
3919 revlogopts
3920 + formatteropts
3920 + formatteropts
3921 + [(b'', b'cache', False, b'use caches instead of clearing')],
3921 + [(b'', b'cache', False, b'use caches instead of clearing')],
3922 b'-c|-m|FILE REV',
3922 b'-c|-m|FILE REV',
3923 )
3923 )
3924 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3924 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3925 """Benchmark obtaining a revlog revision.
3925 """Benchmark obtaining a revlog revision.
3926
3926
3927 Obtaining a revlog revision consists of roughly the following steps:
3927 Obtaining a revlog revision consists of roughly the following steps:
3928
3928
3929 1. Compute the delta chain
3929 1. Compute the delta chain
3930 2. Slice the delta chain if applicable
3930 2. Slice the delta chain if applicable
3931 3. Obtain the raw chunks for that delta chain
3931 3. Obtain the raw chunks for that delta chain
3932 4. Decompress each raw chunk
3932 4. Decompress each raw chunk
3933 5. Apply binary patches to obtain fulltext
3933 5. Apply binary patches to obtain fulltext
3934 6. Verify hash of fulltext
3934 6. Verify hash of fulltext
3935
3935
3936 This command measures the time spent in each of these phases.
3936 This command measures the time spent in each of these phases.
3937 """
3937 """
3938 opts = _byteskwargs(opts)
3938 opts = _byteskwargs(opts)
3939
3939
3940 if opts.get(b'changelog') or opts.get(b'manifest'):
3940 if opts.get(b'changelog') or opts.get(b'manifest'):
3941 file_, rev = None, file_
3941 file_, rev = None, file_
3942 elif rev is None:
3942 elif rev is None:
3943 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3943 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3944
3944
3945 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3945 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3946
3946
3947 # _chunkraw was renamed to _getsegmentforrevs.
3947 # _chunkraw was renamed to _getsegmentforrevs.
3948 try:
3948 try:
3949 segmentforrevs = r._inner.get_segment_for_revs
3949 segmentforrevs = r._inner.get_segment_for_revs
3950 except AttributeError:
3950 except AttributeError:
3951 try:
3951 try:
3952 segmentforrevs = r._getsegmentforrevs
3952 segmentforrevs = r._getsegmentforrevs
3953 except AttributeError:
3953 except AttributeError:
3954 segmentforrevs = r._chunkraw
3954 segmentforrevs = r._chunkraw
3955
3955
3956 node = r.lookup(rev)
3956 node = r.lookup(rev)
3957 rev = r.rev(node)
3957 rev = r.rev(node)
3958
3958
3959 if getattr(r, 'reading', None) is not None:
3959 if getattr(r, 'reading', None) is not None:
3960
3960
3961 @contextlib.contextmanager
3961 @contextlib.contextmanager
3962 def lazy_reading(r):
3962 def lazy_reading(r):
3963 with r.reading():
3963 with r.reading():
3964 yield
3964 yield
3965
3965
3966 else:
3966 else:
3967
3967
3968 @contextlib.contextmanager
3968 @contextlib.contextmanager
3969 def lazy_reading(r):
3969 def lazy_reading(r):
3970 yield
3970 yield
3971
3971
3972 def getrawchunks(data, chain):
3972 def getrawchunks(data, chain):
3973 start = r.start
3973 start = r.start
3974 length = r.length
3974 length = r.length
3975 inline = r._inline
3975 inline = r._inline
3976 try:
3976 try:
3977 iosize = r.index.entry_size
3977 iosize = r.index.entry_size
3978 except AttributeError:
3978 except AttributeError:
3979 iosize = r._io.size
3979 iosize = r._io.size
3980 buffer = util.buffer
3980 buffer = util.buffer
3981
3981
3982 chunks = []
3982 chunks = []
3983 ladd = chunks.append
3983 ladd = chunks.append
3984 for idx, item in enumerate(chain):
3984 for idx, item in enumerate(chain):
3985 offset = start(item[0])
3985 offset = start(item[0])
3986 bits = data[idx]
3986 bits = data[idx]
3987 for rev in item:
3987 for rev in item:
3988 chunkstart = start(rev)
3988 chunkstart = start(rev)
3989 if inline:
3989 if inline:
3990 chunkstart += (rev + 1) * iosize
3990 chunkstart += (rev + 1) * iosize
3991 chunklength = length(rev)
3991 chunklength = length(rev)
3992 ladd(buffer(bits, chunkstart - offset, chunklength))
3992 ladd(buffer(bits, chunkstart - offset, chunklength))
3993
3993
3994 return chunks
3994 return chunks
3995
3995
3996 def dodeltachain(rev):
3996 def dodeltachain(rev):
3997 if not cache:
3997 if not cache:
3998 r.clearcaches()
3998 r.clearcaches()
3999 r._deltachain(rev)
3999 r._deltachain(rev)
4000
4000
4001 def doread(chain):
4001 def doread(chain):
4002 if not cache:
4002 if not cache:
4003 r.clearcaches()
4003 r.clearcaches()
4004 for item in slicedchain:
4004 for item in slicedchain:
4005 with lazy_reading(r):
4005 with lazy_reading(r):
4006 segmentforrevs(item[0], item[-1])
4006 segmentforrevs(item[0], item[-1])
4007
4007
4008 def doslice(r, chain, size):
4008 def doslice(r, chain, size):
4009 for s in slicechunk(r, chain, targetsize=size):
4009 for s in slicechunk(r, chain, targetsize=size):
4010 pass
4010 pass
4011
4011
4012 def dorawchunks(data, chain):
4012 def dorawchunks(data, chain):
4013 if not cache:
4013 if not cache:
4014 r.clearcaches()
4014 r.clearcaches()
4015 getrawchunks(data, chain)
4015 getrawchunks(data, chain)
4016
4016
4017 def dodecompress(chunks):
4017 def dodecompress(chunks):
4018 decomp = r.decompress
4018 decomp = r.decompress
4019 for chunk in chunks:
4019 for chunk in chunks:
4020 decomp(chunk)
4020 decomp(chunk)
4021
4021
4022 def dopatch(text, bins):
4022 def dopatch(text, bins):
4023 if not cache:
4023 if not cache:
4024 r.clearcaches()
4024 r.clearcaches()
4025 mdiff.patches(text, bins)
4025 mdiff.patches(text, bins)
4026
4026
4027 def dohash(text):
4027 def dohash(text):
4028 if not cache:
4028 if not cache:
4029 r.clearcaches()
4029 r.clearcaches()
4030 r.checkhash(text, node, rev=rev)
4030 r.checkhash(text, node, rev=rev)
4031
4031
4032 def dorevision():
4032 def dorevision():
4033 if not cache:
4033 if not cache:
4034 r.clearcaches()
4034 r.clearcaches()
4035 r.revision(node)
4035 r.revision(node)
4036
4036
4037 try:
4037 try:
4038 from mercurial.revlogutils.deltas import slicechunk
4038 from mercurial.revlogutils.deltas import slicechunk
4039 except ImportError:
4039 except ImportError:
4040 slicechunk = getattr(revlog, '_slicechunk', None)
4040 slicechunk = getattr(revlog, '_slicechunk', None)
4041
4041
4042 size = r.length(rev)
4042 size = r.length(rev)
4043 chain = r._deltachain(rev)[0]
4043 chain = r._deltachain(rev)[0]
4044
4044
4045 with_sparse_read = False
4045 with_sparse_read = False
4046 if hasattr(r, 'data_config'):
4046 if hasattr(r, 'data_config'):
4047 with_sparse_read = r.data_config.with_sparse_read
4047 with_sparse_read = r.data_config.with_sparse_read
4048 elif hasattr(r, '_withsparseread'):
4048 elif hasattr(r, '_withsparseread'):
4049 with_sparse_read = r._withsparseread
4049 with_sparse_read = r._withsparseread
4050 if with_sparse_read:
4050 if with_sparse_read:
4051 slicedchain = (chain,)
4051 slicedchain = (chain,)
4052 else:
4052 else:
4053 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4053 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4054 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4054 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4055 rawchunks = getrawchunks(data, slicedchain)
4055 rawchunks = getrawchunks(data, slicedchain)
4056 bins = r._inner._chunks(chain)
4056 bins = r._inner._chunks(chain)
4057 text = bytes(bins[0])
4057 text = bytes(bins[0])
4058 bins = bins[1:]
4058 bins = bins[1:]
4059 text = mdiff.patches(text, bins)
4059 text = mdiff.patches(text, bins)
4060
4060
4061 benches = [
4061 benches = [
4062 (lambda: dorevision(), b'full'),
4062 (lambda: dorevision(), b'full'),
4063 (lambda: dodeltachain(rev), b'deltachain'),
4063 (lambda: dodeltachain(rev), b'deltachain'),
4064 (lambda: doread(chain), b'read'),
4064 (lambda: doread(chain), b'read'),
4065 ]
4065 ]
4066
4066
4067 if with_sparse_read:
4067 if with_sparse_read:
4068 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4068 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4069 benches.append(slicing)
4069 benches.append(slicing)
4070
4070
4071 benches.extend(
4071 benches.extend(
4072 [
4072 [
4073 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4073 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4074 (lambda: dodecompress(rawchunks), b'decompress'),
4074 (lambda: dodecompress(rawchunks), b'decompress'),
4075 (lambda: dopatch(text, bins), b'patch'),
4075 (lambda: dopatch(text, bins), b'patch'),
4076 (lambda: dohash(text), b'hash'),
4076 (lambda: dohash(text), b'hash'),
4077 ]
4077 ]
4078 )
4078 )
4079
4079
4080 timer, fm = gettimer(ui, opts)
4080 timer, fm = gettimer(ui, opts)
4081 for fn, title in benches:
4081 for fn, title in benches:
4082 timer(fn, title=title)
4082 timer(fn, title=title)
4083 fm.end()
4083 fm.end()
4084
4084
4085
4085
4086 @command(
4086 @command(
4087 b'perf::revset|perfrevset',
4087 b'perf::revset|perfrevset',
4088 [
4088 [
4089 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4089 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4090 (b'', b'contexts', False, b'obtain changectx for each revision'),
4090 (b'', b'contexts', False, b'obtain changectx for each revision'),
4091 ]
4091 ]
4092 + formatteropts,
4092 + formatteropts,
4093 b"REVSET",
4093 b"REVSET",
4094 )
4094 )
4095 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4095 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4096 """benchmark the execution time of a revset
4096 """benchmark the execution time of a revset
4097
4097
4098 Use the --clean option if need to evaluate the impact of build volatile
4098 Use the --clean option if need to evaluate the impact of build volatile
4099 revisions set cache on the revset execution. Volatile cache hold filtered
4099 revisions set cache on the revset execution. Volatile cache hold filtered
4100 and obsolete related cache."""
4100 and obsolete related cache."""
4101 opts = _byteskwargs(opts)
4101 opts = _byteskwargs(opts)
4102
4102
4103 timer, fm = gettimer(ui, opts)
4103 timer, fm = gettimer(ui, opts)
4104
4104
4105 def d():
4105 def d():
4106 if clear:
4106 if clear:
4107 repo.invalidatevolatilesets()
4107 repo.invalidatevolatilesets()
4108 if contexts:
4108 if contexts:
4109 for ctx in repo.set(expr):
4109 for ctx in repo.set(expr):
4110 pass
4110 pass
4111 else:
4111 else:
4112 for r in repo.revs(expr):
4112 for r in repo.revs(expr):
4113 pass
4113 pass
4114
4114
4115 timer(d)
4115 timer(d)
4116 fm.end()
4116 fm.end()
4117
4117
4118
4118
4119 @command(
4119 @command(
4120 b'perf::volatilesets|perfvolatilesets',
4120 b'perf::volatilesets|perfvolatilesets',
4121 [
4121 [
4122 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4122 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4123 ]
4123 ]
4124 + formatteropts,
4124 + formatteropts,
4125 )
4125 )
4126 def perfvolatilesets(ui, repo, *names, **opts):
4126 def perfvolatilesets(ui, repo, *names, **opts):
4127 """benchmark the computation of various volatile set
4127 """benchmark the computation of various volatile set
4128
4128
4129 Volatile set computes element related to filtering and obsolescence."""
4129 Volatile set computes element related to filtering and obsolescence."""
4130 opts = _byteskwargs(opts)
4130 opts = _byteskwargs(opts)
4131 timer, fm = gettimer(ui, opts)
4131 timer, fm = gettimer(ui, opts)
4132 repo = repo.unfiltered()
4132 repo = repo.unfiltered()
4133
4133
4134 def getobs(name):
4134 def getobs(name):
4135 def d():
4135 def d():
4136 repo.invalidatevolatilesets()
4136 repo.invalidatevolatilesets()
4137 if opts[b'clear_obsstore']:
4137 if opts[b'clear_obsstore']:
4138 clearfilecache(repo, b'obsstore')
4138 clearfilecache(repo, b'obsstore')
4139 obsolete.getrevs(repo, name)
4139 obsolete.getrevs(repo, name)
4140
4140
4141 return d
4141 return d
4142
4142
4143 allobs = sorted(obsolete.cachefuncs)
4143 allobs = sorted(obsolete.cachefuncs)
4144 if names:
4144 if names:
4145 allobs = [n for n in allobs if n in names]
4145 allobs = [n for n in allobs if n in names]
4146
4146
4147 for name in allobs:
4147 for name in allobs:
4148 timer(getobs(name), title=name)
4148 timer(getobs(name), title=name)
4149
4149
4150 def getfiltered(name):
4150 def getfiltered(name):
4151 def d():
4151 def d():
4152 repo.invalidatevolatilesets()
4152 repo.invalidatevolatilesets()
4153 if opts[b'clear_obsstore']:
4153 if opts[b'clear_obsstore']:
4154 clearfilecache(repo, b'obsstore')
4154 clearfilecache(repo, b'obsstore')
4155 repoview.filterrevs(repo, name)
4155 repoview.filterrevs(repo, name)
4156
4156
4157 return d
4157 return d
4158
4158
4159 allfilter = sorted(repoview.filtertable)
4159 allfilter = sorted(repoview.filtertable)
4160 if names:
4160 if names:
4161 allfilter = [n for n in allfilter if n in names]
4161 allfilter = [n for n in allfilter if n in names]
4162
4162
4163 for name in allfilter:
4163 for name in allfilter:
4164 timer(getfiltered(name), title=name)
4164 timer(getfiltered(name), title=name)
4165 fm.end()
4165 fm.end()
4166
4166
4167
4167
4168 @command(
4168 @command(
4169 b'perf::branchmap|perfbranchmap',
4169 b'perf::branchmap|perfbranchmap',
4170 [
4170 [
4171 (b'f', b'full', False, b'Includes build time of subset'),
4171 (b'f', b'full', False, b'Includes build time of subset'),
4172 (
4172 (
4173 b'',
4173 b'',
4174 b'clear-revbranch',
4174 b'clear-revbranch',
4175 False,
4175 False,
4176 b'purge the revbranch cache between computation',
4176 b'purge the revbranch cache between computation',
4177 ),
4177 ),
4178 ]
4178 ]
4179 + formatteropts,
4179 + formatteropts,
4180 )
4180 )
4181 def perfbranchmap(ui, repo, *filternames, **opts):
4181 def perfbranchmap(ui, repo, *filternames, **opts):
4182 """benchmark the update of a branchmap
4182 """benchmark the update of a branchmap
4183
4183
4184 This benchmarks the full repo.branchmap() call with read and write disabled
4184 This benchmarks the full repo.branchmap() call with read and write disabled
4185 """
4185 """
4186 opts = _byteskwargs(opts)
4186 opts = _byteskwargs(opts)
4187 full = opts.get(b"full", False)
4187 full = opts.get(b"full", False)
4188 clear_revbranch = opts.get(b"clear_revbranch", False)
4188 clear_revbranch = opts.get(b"clear_revbranch", False)
4189 timer, fm = gettimer(ui, opts)
4189 timer, fm = gettimer(ui, opts)
4190
4190
4191 def getbranchmap(filtername):
4191 def getbranchmap(filtername):
4192 """generate a benchmark function for the filtername"""
4192 """generate a benchmark function for the filtername"""
4193 if filtername is None:
4193 if filtername is None:
4194 view = repo
4194 view = repo
4195 else:
4195 else:
4196 view = repo.filtered(filtername)
4196 view = repo.filtered(filtername)
4197 if util.safehasattr(view._branchcaches, '_per_filter'):
4197 if util.safehasattr(view._branchcaches, '_per_filter'):
4198 filtered = view._branchcaches._per_filter
4198 filtered = view._branchcaches._per_filter
4199 else:
4199 else:
4200 # older versions
4200 # older versions
4201 filtered = view._branchcaches
4201 filtered = view._branchcaches
4202
4202
4203 def d():
4203 def d():
4204 if clear_revbranch:
4204 if clear_revbranch:
4205 repo.revbranchcache()._clear()
4205 repo.revbranchcache()._clear()
4206 if full:
4206 if full:
4207 view._branchcaches.clear()
4207 view._branchcaches.clear()
4208 else:
4208 else:
4209 filtered.pop(filtername, None)
4209 filtered.pop(filtername, None)
4210 view.branchmap()
4210 view.branchmap()
4211
4211
4212 return d
4212 return d
4213
4213
4214 # add filter in smaller subset to bigger subset
4214 # add filter in smaller subset to bigger subset
4215 possiblefilters = set(repoview.filtertable)
4215 possiblefilters = set(repoview.filtertable)
4216 if filternames:
4216 if filternames:
4217 possiblefilters &= set(filternames)
4217 possiblefilters &= set(filternames)
4218 subsettable = getbranchmapsubsettable()
4218 subsettable = getbranchmapsubsettable()
4219 allfilters = []
4219 allfilters = []
4220 while possiblefilters:
4220 while possiblefilters:
4221 for name in possiblefilters:
4221 for name in possiblefilters:
4222 subset = subsettable.get(name)
4222 subset = subsettable.get(name)
4223 if subset not in possiblefilters:
4223 if subset not in possiblefilters:
4224 break
4224 break
4225 else:
4225 else:
4226 assert False, b'subset cycle %s!' % possiblefilters
4226 assert False, b'subset cycle %s!' % possiblefilters
4227 allfilters.append(name)
4227 allfilters.append(name)
4228 possiblefilters.remove(name)
4228 possiblefilters.remove(name)
4229
4229
4230 # warm the cache
4230 # warm the cache
4231 if not full:
4231 if not full:
4232 for name in allfilters:
4232 for name in allfilters:
4233 repo.filtered(name).branchmap()
4233 repo.filtered(name).branchmap()
4234 if not filternames or b'unfiltered' in filternames:
4234 if not filternames or b'unfiltered' in filternames:
4235 # add unfiltered
4235 # add unfiltered
4236 allfilters.append(None)
4236 allfilters.append(None)
4237
4237
4238 old_branch_cache_from_file = None
4238 old_branch_cache_from_file = None
4239 branchcacheread = None
4239 branchcacheread = None
4240 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4240 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4241 old_branch_cache_from_file = branchmap.branch_cache_from_file
4241 old_branch_cache_from_file = branchmap.branch_cache_from_file
4242 branchmap.branch_cache_from_file = lambda *args: None
4242 branchmap.branch_cache_from_file = lambda *args: None
4243 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4243 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4244 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4244 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4245 branchcacheread.set(classmethod(lambda *args: None))
4245 branchcacheread.set(classmethod(lambda *args: None))
4246 else:
4246 else:
4247 # older versions
4247 # older versions
4248 branchcacheread = safeattrsetter(branchmap, b'read')
4248 branchcacheread = safeattrsetter(branchmap, b'read')
4249 branchcacheread.set(lambda *args: None)
4249 branchcacheread.set(lambda *args: None)
4250 if util.safehasattr(branchmap, '_LocalBranchCache'):
4250 if util.safehasattr(branchmap, '_LocalBranchCache'):
4251 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4251 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4252 branchcachewrite.set(lambda *args: None)
4252 branchcachewrite.set(lambda *args: None)
4253 else:
4253 else:
4254 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4254 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4255 branchcachewrite.set(lambda *args: None)
4255 branchcachewrite.set(lambda *args: None)
4256 try:
4256 try:
4257 for name in allfilters:
4257 for name in allfilters:
4258 printname = name
4258 printname = name
4259 if name is None:
4259 if name is None:
4260 printname = b'unfiltered'
4260 printname = b'unfiltered'
4261 timer(getbranchmap(name), title=printname)
4261 timer(getbranchmap(name), title=printname)
4262 finally:
4262 finally:
4263 if old_branch_cache_from_file is not None:
4263 if old_branch_cache_from_file is not None:
4264 branchmap.branch_cache_from_file = old_branch_cache_from_file
4264 branchmap.branch_cache_from_file = old_branch_cache_from_file
4265 if branchcacheread is not None:
4265 if branchcacheread is not None:
4266 branchcacheread.restore()
4266 branchcacheread.restore()
4267 branchcachewrite.restore()
4267 branchcachewrite.restore()
4268 fm.end()
4268 fm.end()
4269
4269
4270
4270
4271 @command(
4271 @command(
4272 b'perf::branchmapupdate|perfbranchmapupdate',
4272 b'perf::branchmapupdate|perfbranchmapupdate',
4273 [
4273 [
4274 (b'', b'base', [], b'subset of revision to start from'),
4274 (b'', b'base', [], b'subset of revision to start from'),
4275 (b'', b'target', [], b'subset of revision to end with'),
4275 (b'', b'target', [], b'subset of revision to end with'),
4276 (b'', b'clear-caches', False, b'clear cache between each runs'),
4276 (b'', b'clear-caches', False, b'clear cache between each runs'),
4277 ]
4277 ]
4278 + formatteropts,
4278 + formatteropts,
4279 )
4279 )
4280 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4280 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4281 """benchmark branchmap update from for <base> revs to <target> revs
4281 """benchmark branchmap update from for <base> revs to <target> revs
4282
4282
4283 If `--clear-caches` is passed, the following items will be reset before
4283 If `--clear-caches` is passed, the following items will be reset before
4284 each update:
4284 each update:
4285 * the changelog instance and associated indexes
4285 * the changelog instance and associated indexes
4286 * the rev-branch-cache instance
4286 * the rev-branch-cache instance
4287
4287
4288 Examples:
4288 Examples:
4289
4289
4290 # update for the one last revision
4290 # update for the one last revision
4291 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4291 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4292
4292
4293 $ update for change coming with a new branch
4293 $ update for change coming with a new branch
4294 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4294 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4295 """
4295 """
4296 from mercurial import branchmap
4296 from mercurial import branchmap
4297 from mercurial import repoview
4297 from mercurial import repoview
4298
4298
4299 opts = _byteskwargs(opts)
4299 opts = _byteskwargs(opts)
4300 timer, fm = gettimer(ui, opts)
4300 timer, fm = gettimer(ui, opts)
4301 clearcaches = opts[b'clear_caches']
4301 clearcaches = opts[b'clear_caches']
4302 unfi = repo.unfiltered()
4302 unfi = repo.unfiltered()
4303 x = [None] # used to pass data between closure
4303 x = [None] # used to pass data between closure
4304
4304
4305 # we use a `list` here to avoid possible side effect from smartset
4305 # we use a `list` here to avoid possible side effect from smartset
4306 baserevs = list(scmutil.revrange(repo, base))
4306 baserevs = list(scmutil.revrange(repo, base))
4307 targetrevs = list(scmutil.revrange(repo, target))
4307 targetrevs = list(scmutil.revrange(repo, target))
4308 if not baserevs:
4308 if not baserevs:
4309 raise error.Abort(b'no revisions selected for --base')
4309 raise error.Abort(b'no revisions selected for --base')
4310 if not targetrevs:
4310 if not targetrevs:
4311 raise error.Abort(b'no revisions selected for --target')
4311 raise error.Abort(b'no revisions selected for --target')
4312
4312
4313 # make sure the target branchmap also contains the one in the base
4313 # make sure the target branchmap also contains the one in the base
4314 targetrevs = list(set(baserevs) | set(targetrevs))
4314 targetrevs = list(set(baserevs) | set(targetrevs))
4315 targetrevs.sort()
4315 targetrevs.sort()
4316
4316
4317 cl = repo.changelog
4317 cl = repo.changelog
4318 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4318 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4319 allbaserevs.sort()
4319 allbaserevs.sort()
4320 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4320 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4321
4321
4322 newrevs = list(alltargetrevs.difference(allbaserevs))
4322 newrevs = list(alltargetrevs.difference(allbaserevs))
4323 newrevs.sort()
4323 newrevs.sort()
4324
4324
4325 allrevs = frozenset(unfi.changelog.revs())
4325 allrevs = frozenset(unfi.changelog.revs())
4326 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4326 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4327 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4327 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4328
4328
4329 def basefilter(repo, visibilityexceptions=None):
4329 def basefilter(repo, visibilityexceptions=None):
4330 return basefilterrevs
4330 return basefilterrevs
4331
4331
4332 def targetfilter(repo, visibilityexceptions=None):
4332 def targetfilter(repo, visibilityexceptions=None):
4333 return targetfilterrevs
4333 return targetfilterrevs
4334
4334
4335 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4335 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4336 ui.status(msg % (len(allbaserevs), len(newrevs)))
4336 ui.status(msg % (len(allbaserevs), len(newrevs)))
4337 if targetfilterrevs:
4337 if targetfilterrevs:
4338 msg = b'(%d revisions still filtered)\n'
4338 msg = b'(%d revisions still filtered)\n'
4339 ui.status(msg % len(targetfilterrevs))
4339 ui.status(msg % len(targetfilterrevs))
4340
4340
4341 try:
4341 try:
4342 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4342 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4343 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4343 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4344
4344
4345 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4345 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4346 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4346 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4347
4347
4348 bcache = repo.branchmap()
4348 bcache = repo.branchmap()
4349 copy_method = 'copy'
4349 copy_method = 'copy'
4350
4350
4351 copy_base_kwargs = copy_base_kwargs = {}
4351 copy_base_kwargs = copy_base_kwargs = {}
4352 if hasattr(bcache, 'copy'):
4352 if hasattr(bcache, 'copy'):
4353 if 'repo' in getargspec(bcache.copy).args:
4353 if 'repo' in getargspec(bcache.copy).args:
4354 copy_base_kwargs = {"repo": baserepo}
4354 copy_base_kwargs = {"repo": baserepo}
4355 copy_target_kwargs = {"repo": targetrepo}
4355 copy_target_kwargs = {"repo": targetrepo}
4356 else:
4356 else:
4357 copy_method = 'inherit_for'
4357 copy_method = 'inherit_for'
4358 copy_base_kwargs = {"repo": baserepo}
4358 copy_base_kwargs = {"repo": baserepo}
4359 copy_target_kwargs = {"repo": targetrepo}
4359 copy_target_kwargs = {"repo": targetrepo}
4360
4360
4361 # try to find an existing branchmap to reuse
4361 # try to find an existing branchmap to reuse
4362 subsettable = getbranchmapsubsettable()
4362 subsettable = getbranchmapsubsettable()
4363 candidatefilter = subsettable.get(None)
4363 candidatefilter = subsettable.get(None)
4364 while candidatefilter is not None:
4364 while candidatefilter is not None:
4365 candidatebm = repo.filtered(candidatefilter).branchmap()
4365 candidatebm = repo.filtered(candidatefilter).branchmap()
4366 if candidatebm.validfor(baserepo):
4366 if candidatebm.validfor(baserepo):
4367 filtered = repoview.filterrevs(repo, candidatefilter)
4367 filtered = repoview.filterrevs(repo, candidatefilter)
4368 missing = [r for r in allbaserevs if r in filtered]
4368 missing = [r for r in allbaserevs if r in filtered]
4369 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4369 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4370 base.update(baserepo, missing)
4370 base.update(baserepo, missing)
4371 break
4371 break
4372 candidatefilter = subsettable.get(candidatefilter)
4372 candidatefilter = subsettable.get(candidatefilter)
4373 else:
4373 else:
4374 # no suitable subset where found
4374 # no suitable subset where found
4375 base = branchmap.branchcache()
4375 base = branchmap.branchcache()
4376 base.update(baserepo, allbaserevs)
4376 base.update(baserepo, allbaserevs)
4377
4377
4378 def setup():
4378 def setup():
4379 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4379 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4380 if clearcaches:
4380 if clearcaches:
4381 unfi._revbranchcache = None
4381 unfi._revbranchcache = None
4382 clearchangelog(repo)
4382 clearchangelog(repo)
4383
4383
4384 def bench():
4384 def bench():
4385 x[0].update(targetrepo, newrevs)
4385 x[0].update(targetrepo, newrevs)
4386
4386
4387 timer(bench, setup=setup)
4387 timer(bench, setup=setup)
4388 fm.end()
4388 fm.end()
4389 finally:
4389 finally:
4390 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4390 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4391 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4391 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4392
4392
4393
4393
4394 @command(
4394 @command(
4395 b'perf::branchmapload|perfbranchmapload',
4395 b'perf::branchmapload|perfbranchmapload',
4396 [
4396 [
4397 (b'f', b'filter', b'', b'Specify repoview filter'),
4397 (b'f', b'filter', b'', b'Specify repoview filter'),
4398 (b'', b'list', False, b'List brachmap filter caches'),
4398 (b'', b'list', False, b'List brachmap filter caches'),
4399 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4399 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4400 ]
4400 ]
4401 + formatteropts,
4401 + formatteropts,
4402 )
4402 )
4403 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4403 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4404 """benchmark reading the branchmap"""
4404 """benchmark reading the branchmap"""
4405 opts = _byteskwargs(opts)
4405 opts = _byteskwargs(opts)
4406 clearrevlogs = opts[b'clear_revlogs']
4406 clearrevlogs = opts[b'clear_revlogs']
4407
4407
4408 if list:
4408 if list:
4409 for name, kind, st in repo.cachevfs.readdir(stat=True):
4409 for name, kind, st in repo.cachevfs.readdir(stat=True):
4410 if name.startswith(b'branch2'):
4410 if name.startswith(b'branch2'):
4411 filtername = name.partition(b'-')[2] or b'unfiltered'
4411 filtername = name.partition(b'-')[2] or b'unfiltered'
4412 ui.status(
4412 ui.status(
4413 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4413 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4414 )
4414 )
4415 return
4415 return
4416 if not filter:
4416 if not filter:
4417 filter = None
4417 filter = None
4418 subsettable = getbranchmapsubsettable()
4418 subsettable = getbranchmapsubsettable()
4419 if filter is None:
4419 if filter is None:
4420 repo = repo.unfiltered()
4420 repo = repo.unfiltered()
4421 else:
4421 else:
4422 repo = repoview.repoview(repo, filter)
4422 repo = repoview.repoview(repo, filter)
4423
4423
4424 repo.branchmap() # make sure we have a relevant, up to date branchmap
4424 repo.branchmap() # make sure we have a relevant, up to date branchmap
4425
4425
4426 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4426 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4427 if fromfile is None:
4427 if fromfile is None:
4428 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4428 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4429 if fromfile is None:
4429 if fromfile is None:
4430 fromfile = branchmap.read
4430 fromfile = branchmap.read
4431
4431
4432 currentfilter = filter
4432 currentfilter = filter
4433 # try once without timer, the filter may not be cached
4433 # try once without timer, the filter may not be cached
4434 while fromfile(repo) is None:
4434 while fromfile(repo) is None:
4435 currentfilter = subsettable.get(currentfilter)
4435 currentfilter = subsettable.get(currentfilter)
4436 if currentfilter is None:
4436 if currentfilter is None:
4437 raise error.Abort(
4437 raise error.Abort(
4438 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4438 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4439 )
4439 )
4440 repo = repo.filtered(currentfilter)
4440 repo = repo.filtered(currentfilter)
4441 timer, fm = gettimer(ui, opts)
4441 timer, fm = gettimer(ui, opts)
4442
4442
4443 def setup():
4443 def setup():
4444 if clearrevlogs:
4444 if clearrevlogs:
4445 clearchangelog(repo)
4445 clearchangelog(repo)
4446
4446
4447 def bench():
4447 def bench():
4448 fromfile(repo)
4448 fromfile(repo)
4449
4449
4450 timer(bench, setup=setup)
4450 timer(bench, setup=setup)
4451 fm.end()
4451 fm.end()
4452
4452
4453
4453
4454 @command(b'perf::loadmarkers|perfloadmarkers')
4454 @command(b'perf::loadmarkers|perfloadmarkers')
4455 def perfloadmarkers(ui, repo):
4455 def perfloadmarkers(ui, repo):
4456 """benchmark the time to parse the on-disk markers for a repo
4456 """benchmark the time to parse the on-disk markers for a repo
4457
4457
4458 Result is the number of markers in the repo."""
4458 Result is the number of markers in the repo."""
4459 timer, fm = gettimer(ui)
4459 timer, fm = gettimer(ui)
4460 svfs = getsvfs(repo)
4460 svfs = getsvfs(repo)
4461 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4461 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4462 fm.end()
4462 fm.end()
4463
4463
4464
4464
4465 @command(
4465 @command(
4466 b'perf::lrucachedict|perflrucachedict',
4466 b'perf::lrucachedict|perflrucachedict',
4467 formatteropts
4467 formatteropts
4468 + [
4468 + [
4469 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4469 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4470 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4470 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4471 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4471 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4472 (b'', b'size', 4, b'size of cache'),
4472 (b'', b'size', 4, b'size of cache'),
4473 (b'', b'gets', 10000, b'number of key lookups'),
4473 (b'', b'gets', 10000, b'number of key lookups'),
4474 (b'', b'sets', 10000, b'number of key sets'),
4474 (b'', b'sets', 10000, b'number of key sets'),
4475 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4475 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4476 (
4476 (
4477 b'',
4477 b'',
4478 b'mixedgetfreq',
4478 b'mixedgetfreq',
4479 50,
4479 50,
4480 b'frequency of get vs set ops in mixed mode',
4480 b'frequency of get vs set ops in mixed mode',
4481 ),
4481 ),
4482 ],
4482 ],
4483 norepo=True,
4483 norepo=True,
4484 )
4484 )
4485 def perflrucache(
4485 def perflrucache(
4486 ui,
4486 ui,
4487 mincost=0,
4487 mincost=0,
4488 maxcost=100,
4488 maxcost=100,
4489 costlimit=0,
4489 costlimit=0,
4490 size=4,
4490 size=4,
4491 gets=10000,
4491 gets=10000,
4492 sets=10000,
4492 sets=10000,
4493 mixed=10000,
4493 mixed=10000,
4494 mixedgetfreq=50,
4494 mixedgetfreq=50,
4495 **opts
4495 **opts
4496 ):
4496 ):
4497 opts = _byteskwargs(opts)
4497 opts = _byteskwargs(opts)
4498
4498
4499 def doinit():
4499 def doinit():
4500 for i in _xrange(10000):
4500 for i in _xrange(10000):
4501 util.lrucachedict(size)
4501 util.lrucachedict(size)
4502
4502
4503 costrange = list(range(mincost, maxcost + 1))
4503 costrange = list(range(mincost, maxcost + 1))
4504
4504
4505 values = []
4505 values = []
4506 for i in _xrange(size):
4506 for i in _xrange(size):
4507 values.append(random.randint(0, _maxint))
4507 values.append(random.randint(0, _maxint))
4508
4508
4509 # Get mode fills the cache and tests raw lookup performance with no
4509 # Get mode fills the cache and tests raw lookup performance with no
4510 # eviction.
4510 # eviction.
4511 getseq = []
4511 getseq = []
4512 for i in _xrange(gets):
4512 for i in _xrange(gets):
4513 getseq.append(random.choice(values))
4513 getseq.append(random.choice(values))
4514
4514
4515 def dogets():
4515 def dogets():
4516 d = util.lrucachedict(size)
4516 d = util.lrucachedict(size)
4517 for v in values:
4517 for v in values:
4518 d[v] = v
4518 d[v] = v
4519 for key in getseq:
4519 for key in getseq:
4520 value = d[key]
4520 value = d[key]
4521 value # silence pyflakes warning
4521 value # silence pyflakes warning
4522
4522
4523 def dogetscost():
4523 def dogetscost():
4524 d = util.lrucachedict(size, maxcost=costlimit)
4524 d = util.lrucachedict(size, maxcost=costlimit)
4525 for i, v in enumerate(values):
4525 for i, v in enumerate(values):
4526 d.insert(v, v, cost=costs[i])
4526 d.insert(v, v, cost=costs[i])
4527 for key in getseq:
4527 for key in getseq:
4528 try:
4528 try:
4529 value = d[key]
4529 value = d[key]
4530 value # silence pyflakes warning
4530 value # silence pyflakes warning
4531 except KeyError:
4531 except KeyError:
4532 pass
4532 pass
4533
4533
4534 # Set mode tests insertion speed with cache eviction.
4534 # Set mode tests insertion speed with cache eviction.
4535 setseq = []
4535 setseq = []
4536 costs = []
4536 costs = []
4537 for i in _xrange(sets):
4537 for i in _xrange(sets):
4538 setseq.append(random.randint(0, _maxint))
4538 setseq.append(random.randint(0, _maxint))
4539 costs.append(random.choice(costrange))
4539 costs.append(random.choice(costrange))
4540
4540
4541 def doinserts():
4541 def doinserts():
4542 d = util.lrucachedict(size)
4542 d = util.lrucachedict(size)
4543 for v in setseq:
4543 for v in setseq:
4544 d.insert(v, v)
4544 d.insert(v, v)
4545
4545
4546 def doinsertscost():
4546 def doinsertscost():
4547 d = util.lrucachedict(size, maxcost=costlimit)
4547 d = util.lrucachedict(size, maxcost=costlimit)
4548 for i, v in enumerate(setseq):
4548 for i, v in enumerate(setseq):
4549 d.insert(v, v, cost=costs[i])
4549 d.insert(v, v, cost=costs[i])
4550
4550
4551 def dosets():
4551 def dosets():
4552 d = util.lrucachedict(size)
4552 d = util.lrucachedict(size)
4553 for v in setseq:
4553 for v in setseq:
4554 d[v] = v
4554 d[v] = v
4555
4555
4556 # Mixed mode randomly performs gets and sets with eviction.
4556 # Mixed mode randomly performs gets and sets with eviction.
4557 mixedops = []
4557 mixedops = []
4558 for i in _xrange(mixed):
4558 for i in _xrange(mixed):
4559 r = random.randint(0, 100)
4559 r = random.randint(0, 100)
4560 if r < mixedgetfreq:
4560 if r < mixedgetfreq:
4561 op = 0
4561 op = 0
4562 else:
4562 else:
4563 op = 1
4563 op = 1
4564
4564
4565 mixedops.append(
4565 mixedops.append(
4566 (op, random.randint(0, size * 2), random.choice(costrange))
4566 (op, random.randint(0, size * 2), random.choice(costrange))
4567 )
4567 )
4568
4568
4569 def domixed():
4569 def domixed():
4570 d = util.lrucachedict(size)
4570 d = util.lrucachedict(size)
4571
4571
4572 for op, v, cost in mixedops:
4572 for op, v, cost in mixedops:
4573 if op == 0:
4573 if op == 0:
4574 try:
4574 try:
4575 d[v]
4575 d[v]
4576 except KeyError:
4576 except KeyError:
4577 pass
4577 pass
4578 else:
4578 else:
4579 d[v] = v
4579 d[v] = v
4580
4580
4581 def domixedcost():
4581 def domixedcost():
4582 d = util.lrucachedict(size, maxcost=costlimit)
4582 d = util.lrucachedict(size, maxcost=costlimit)
4583
4583
4584 for op, v, cost in mixedops:
4584 for op, v, cost in mixedops:
4585 if op == 0:
4585 if op == 0:
4586 try:
4586 try:
4587 d[v]
4587 d[v]
4588 except KeyError:
4588 except KeyError:
4589 pass
4589 pass
4590 else:
4590 else:
4591 d.insert(v, v, cost=cost)
4591 d.insert(v, v, cost=cost)
4592
4592
4593 benches = [
4593 benches = [
4594 (doinit, b'init'),
4594 (doinit, b'init'),
4595 ]
4595 ]
4596
4596
4597 if costlimit:
4597 if costlimit:
4598 benches.extend(
4598 benches.extend(
4599 [
4599 [
4600 (dogetscost, b'gets w/ cost limit'),
4600 (dogetscost, b'gets w/ cost limit'),
4601 (doinsertscost, b'inserts w/ cost limit'),
4601 (doinsertscost, b'inserts w/ cost limit'),
4602 (domixedcost, b'mixed w/ cost limit'),
4602 (domixedcost, b'mixed w/ cost limit'),
4603 ]
4603 ]
4604 )
4604 )
4605 else:
4605 else:
4606 benches.extend(
4606 benches.extend(
4607 [
4607 [
4608 (dogets, b'gets'),
4608 (dogets, b'gets'),
4609 (doinserts, b'inserts'),
4609 (doinserts, b'inserts'),
4610 (dosets, b'sets'),
4610 (dosets, b'sets'),
4611 (domixed, b'mixed'),
4611 (domixed, b'mixed'),
4612 ]
4612 ]
4613 )
4613 )
4614
4614
4615 for fn, title in benches:
4615 for fn, title in benches:
4616 timer, fm = gettimer(ui, opts)
4616 timer, fm = gettimer(ui, opts)
4617 timer(fn, title=title)
4617 timer(fn, title=title)
4618 fm.end()
4618 fm.end()
4619
4619
4620
4620
4621 @command(
4621 @command(
4622 b'perf::write|perfwrite',
4622 b'perf::write|perfwrite',
4623 formatteropts
4623 formatteropts
4624 + [
4624 + [
4625 (b'', b'write-method', b'write', b'ui write method'),
4625 (b'', b'write-method', b'write', b'ui write method'),
4626 (b'', b'nlines', 100, b'number of lines'),
4626 (b'', b'nlines', 100, b'number of lines'),
4627 (b'', b'nitems', 100, b'number of items (per line)'),
4627 (b'', b'nitems', 100, b'number of items (per line)'),
4628 (b'', b'item', b'x', b'item that is written'),
4628 (b'', b'item', b'x', b'item that is written'),
4629 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4629 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4630 (b'', b'flush-line', None, b'flush after each line'),
4630 (b'', b'flush-line', None, b'flush after each line'),
4631 ],
4631 ],
4632 )
4632 )
4633 def perfwrite(ui, repo, **opts):
4633 def perfwrite(ui, repo, **opts):
4634 """microbenchmark ui.write (and others)"""
4634 """microbenchmark ui.write (and others)"""
4635 opts = _byteskwargs(opts)
4635 opts = _byteskwargs(opts)
4636
4636
4637 write = getattr(ui, _sysstr(opts[b'write_method']))
4637 write = getattr(ui, _sysstr(opts[b'write_method']))
4638 nlines = int(opts[b'nlines'])
4638 nlines = int(opts[b'nlines'])
4639 nitems = int(opts[b'nitems'])
4639 nitems = int(opts[b'nitems'])
4640 item = opts[b'item']
4640 item = opts[b'item']
4641 batch_line = opts.get(b'batch_line')
4641 batch_line = opts.get(b'batch_line')
4642 flush_line = opts.get(b'flush_line')
4642 flush_line = opts.get(b'flush_line')
4643
4643
4644 if batch_line:
4644 if batch_line:
4645 line = item * nitems + b'\n'
4645 line = item * nitems + b'\n'
4646
4646
4647 def benchmark():
4647 def benchmark():
4648 for i in pycompat.xrange(nlines):
4648 for i in pycompat.xrange(nlines):
4649 if batch_line:
4649 if batch_line:
4650 write(line)
4650 write(line)
4651 else:
4651 else:
4652 for i in pycompat.xrange(nitems):
4652 for i in pycompat.xrange(nitems):
4653 write(item)
4653 write(item)
4654 write(b'\n')
4654 write(b'\n')
4655 if flush_line:
4655 if flush_line:
4656 ui.flush()
4656 ui.flush()
4657 ui.flush()
4657 ui.flush()
4658
4658
4659 timer, fm = gettimer(ui, opts)
4659 timer, fm = gettimer(ui, opts)
4660 timer(benchmark)
4660 timer(benchmark)
4661 fm.end()
4661 fm.end()
4662
4662
4663
4663
4664 def uisetup(ui):
4664 def uisetup(ui):
4665 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4665 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4666 commands, b'debugrevlogopts'
4666 commands, b'debugrevlogopts'
4667 ):
4667 ):
4668 # for "historical portability":
4668 # for "historical portability":
4669 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4669 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4670 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4670 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4671 # openrevlog() should cause failure, because it has been
4671 # openrevlog() should cause failure, because it has been
4672 # available since 3.5 (or 49c583ca48c4).
4672 # available since 3.5 (or 49c583ca48c4).
4673 def openrevlog(orig, repo, cmd, file_, opts):
4673 def openrevlog(orig, repo, cmd, file_, opts):
4674 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4674 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4675 raise error.Abort(
4675 raise error.Abort(
4676 b"This version doesn't support --dir option",
4676 b"This version doesn't support --dir option",
4677 hint=b"use 3.5 or later",
4677 hint=b"use 3.5 or later",
4678 )
4678 )
4679 return orig(repo, cmd, file_, opts)
4679 return orig(repo, cmd, file_, opts)
4680
4680
4681 name = _sysstr(b'openrevlog')
4681 name = _sysstr(b'openrevlog')
4682 extensions.wrapfunction(cmdutil, name, openrevlog)
4682 extensions.wrapfunction(cmdutil, name, openrevlog)
4683
4683
4684
4684
4685 @command(
4685 @command(
4686 b'perf::progress|perfprogress',
4686 b'perf::progress|perfprogress',
4687 formatteropts
4687 formatteropts
4688 + [
4688 + [
4689 (b'', b'topic', b'topic', b'topic for progress messages'),
4689 (b'', b'topic', b'topic', b'topic for progress messages'),
4690 (b'c', b'total', 1000000, b'total value we are progressing to'),
4690 (b'c', b'total', 1000000, b'total value we are progressing to'),
4691 ],
4691 ],
4692 norepo=True,
4692 norepo=True,
4693 )
4693 )
4694 def perfprogress(ui, topic=None, total=None, **opts):
4694 def perfprogress(ui, topic=None, total=None, **opts):
4695 """printing of progress bars"""
4695 """printing of progress bars"""
4696 opts = _byteskwargs(opts)
4696 opts = _byteskwargs(opts)
4697
4697
4698 timer, fm = gettimer(ui, opts)
4698 timer, fm = gettimer(ui, opts)
4699
4699
4700 def doprogress():
4700 def doprogress():
4701 with ui.makeprogress(topic, total=total) as progress:
4701 with ui.makeprogress(topic, total=total) as progress:
4702 for i in _xrange(total):
4702 for i in _xrange(total):
4703 progress.increment()
4703 progress.increment()
4704
4704
4705 timer(doprogress)
4705 timer(doprogress)
4706 fm.end()
4706 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now