##// END OF EJS Templates
perf: fix perf::tags...
marmoute -
r51708:d2f72f70 stable
parent child Browse files
Show More
@@ -1,4448 +1,4448 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", False)
459 displayall = ui.configbool(b"perf", b"all-timing", False)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 @command(
885 @command(
886 b'perf::tags|perftags',
886 b'perf::tags|perftags',
887 formatteropts
887 formatteropts
888 + [
888 + [
889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
890 ],
890 ],
891 )
891 )
892 def perftags(ui, repo, **opts):
892 def perftags(ui, repo, **opts):
893 opts = _byteskwargs(opts)
893 opts = _byteskwargs(opts)
894 timer, fm = gettimer(ui, opts)
894 timer, fm = gettimer(ui, opts)
895 repocleartagscache = repocleartagscachefunc(repo)
895 repocleartagscache = repocleartagscachefunc(repo)
896 clearrevlogs = opts[b'clear_revlogs']
896 clearrevlogs = opts[b'clear_revlogs']
897
897
898 def s():
898 def s():
899 if clearrevlogs:
899 if clearrevlogs:
900 clearchangelog(repo)
900 clearchangelog(repo)
901 clearfilecache(repo.unfiltered(), 'manifest')
901 clearfilecache(repo.unfiltered(), 'manifest')
902 repocleartagscache()
902 repocleartagscache()
903
903
904 def t():
904 def t():
905 return len(repo.tags())
905 len(repo.tags())
906
906
907 timer(t, setup=s)
907 timer(t, setup=s)
908 fm.end()
908 fm.end()
909
909
910
910
911 @command(b'perf::ancestors|perfancestors', formatteropts)
911 @command(b'perf::ancestors|perfancestors', formatteropts)
912 def perfancestors(ui, repo, **opts):
912 def perfancestors(ui, repo, **opts):
913 opts = _byteskwargs(opts)
913 opts = _byteskwargs(opts)
914 timer, fm = gettimer(ui, opts)
914 timer, fm = gettimer(ui, opts)
915 heads = repo.changelog.headrevs()
915 heads = repo.changelog.headrevs()
916
916
917 def d():
917 def d():
918 for a in repo.changelog.ancestors(heads):
918 for a in repo.changelog.ancestors(heads):
919 pass
919 pass
920
920
921 timer(d)
921 timer(d)
922 fm.end()
922 fm.end()
923
923
924
924
925 @command(b'perf::ancestorset|perfancestorset', formatteropts)
925 @command(b'perf::ancestorset|perfancestorset', formatteropts)
926 def perfancestorset(ui, repo, revset, **opts):
926 def perfancestorset(ui, repo, revset, **opts):
927 opts = _byteskwargs(opts)
927 opts = _byteskwargs(opts)
928 timer, fm = gettimer(ui, opts)
928 timer, fm = gettimer(ui, opts)
929 revs = repo.revs(revset)
929 revs = repo.revs(revset)
930 heads = repo.changelog.headrevs()
930 heads = repo.changelog.headrevs()
931
931
932 def d():
932 def d():
933 s = repo.changelog.ancestors(heads)
933 s = repo.changelog.ancestors(heads)
934 for rev in revs:
934 for rev in revs:
935 rev in s
935 rev in s
936
936
937 timer(d)
937 timer(d)
938 fm.end()
938 fm.end()
939
939
940
940
941 @command(
941 @command(
942 b'perf::delta-find',
942 b'perf::delta-find',
943 revlogopts + formatteropts,
943 revlogopts + formatteropts,
944 b'-c|-m|FILE REV',
944 b'-c|-m|FILE REV',
945 )
945 )
946 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
946 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
947 """benchmark the process of finding a valid delta for a revlog revision
947 """benchmark the process of finding a valid delta for a revlog revision
948
948
949 When a revlog receives a new revision (e.g. from a commit, or from an
949 When a revlog receives a new revision (e.g. from a commit, or from an
950 incoming bundle), it searches for a suitable delta-base to produce a delta.
950 incoming bundle), it searches for a suitable delta-base to produce a delta.
951 This perf command measures how much time we spend in this process. It
951 This perf command measures how much time we spend in this process. It
952 operates on an already stored revision.
952 operates on an already stored revision.
953
953
954 See `hg help debug-delta-find` for another related command.
954 See `hg help debug-delta-find` for another related command.
955 """
955 """
956 from mercurial import revlogutils
956 from mercurial import revlogutils
957 import mercurial.revlogutils.deltas as deltautil
957 import mercurial.revlogutils.deltas as deltautil
958
958
959 opts = _byteskwargs(opts)
959 opts = _byteskwargs(opts)
960 if arg_2 is None:
960 if arg_2 is None:
961 file_ = None
961 file_ = None
962 rev = arg_1
962 rev = arg_1
963 else:
963 else:
964 file_ = arg_1
964 file_ = arg_1
965 rev = arg_2
965 rev = arg_2
966
966
967 repo = repo.unfiltered()
967 repo = repo.unfiltered()
968
968
969 timer, fm = gettimer(ui, opts)
969 timer, fm = gettimer(ui, opts)
970
970
971 rev = int(rev)
971 rev = int(rev)
972
972
973 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
973 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
974
974
975 deltacomputer = deltautil.deltacomputer(revlog)
975 deltacomputer = deltautil.deltacomputer(revlog)
976
976
977 node = revlog.node(rev)
977 node = revlog.node(rev)
978 p1r, p2r = revlog.parentrevs(rev)
978 p1r, p2r = revlog.parentrevs(rev)
979 p1 = revlog.node(p1r)
979 p1 = revlog.node(p1r)
980 p2 = revlog.node(p2r)
980 p2 = revlog.node(p2r)
981 full_text = revlog.revision(rev)
981 full_text = revlog.revision(rev)
982 textlen = len(full_text)
982 textlen = len(full_text)
983 cachedelta = None
983 cachedelta = None
984 flags = revlog.flags(rev)
984 flags = revlog.flags(rev)
985
985
986 revinfo = revlogutils.revisioninfo(
986 revinfo = revlogutils.revisioninfo(
987 node,
987 node,
988 p1,
988 p1,
989 p2,
989 p2,
990 [full_text], # btext
990 [full_text], # btext
991 textlen,
991 textlen,
992 cachedelta,
992 cachedelta,
993 flags,
993 flags,
994 )
994 )
995
995
996 # Note: we should probably purge the potential caches (like the full
996 # Note: we should probably purge the potential caches (like the full
997 # manifest cache) between runs.
997 # manifest cache) between runs.
998 def find_one():
998 def find_one():
999 with revlog._datafp() as fh:
999 with revlog._datafp() as fh:
1000 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1000 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1001
1001
1002 timer(find_one)
1002 timer(find_one)
1003 fm.end()
1003 fm.end()
1004
1004
1005
1005
1006 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1006 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1007 def perfdiscovery(ui, repo, path, **opts):
1007 def perfdiscovery(ui, repo, path, **opts):
1008 """benchmark discovery between local repo and the peer at given path"""
1008 """benchmark discovery between local repo and the peer at given path"""
1009 repos = [repo, None]
1009 repos = [repo, None]
1010 timer, fm = gettimer(ui, opts)
1010 timer, fm = gettimer(ui, opts)
1011
1011
1012 try:
1012 try:
1013 from mercurial.utils.urlutil import get_unique_pull_path_obj
1013 from mercurial.utils.urlutil import get_unique_pull_path_obj
1014
1014
1015 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1015 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1016 except ImportError:
1016 except ImportError:
1017 try:
1017 try:
1018 from mercurial.utils.urlutil import get_unique_pull_path
1018 from mercurial.utils.urlutil import get_unique_pull_path
1019
1019
1020 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1020 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1021 except ImportError:
1021 except ImportError:
1022 path = ui.expandpath(path)
1022 path = ui.expandpath(path)
1023
1023
1024 def s():
1024 def s():
1025 repos[1] = hg.peer(ui, opts, path)
1025 repos[1] = hg.peer(ui, opts, path)
1026
1026
1027 def d():
1027 def d():
1028 setdiscovery.findcommonheads(ui, *repos)
1028 setdiscovery.findcommonheads(ui, *repos)
1029
1029
1030 timer(d, setup=s)
1030 timer(d, setup=s)
1031 fm.end()
1031 fm.end()
1032
1032
1033
1033
1034 @command(
1034 @command(
1035 b'perf::bookmarks|perfbookmarks',
1035 b'perf::bookmarks|perfbookmarks',
1036 formatteropts
1036 formatteropts
1037 + [
1037 + [
1038 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1038 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1039 ],
1039 ],
1040 )
1040 )
1041 def perfbookmarks(ui, repo, **opts):
1041 def perfbookmarks(ui, repo, **opts):
1042 """benchmark parsing bookmarks from disk to memory"""
1042 """benchmark parsing bookmarks from disk to memory"""
1043 opts = _byteskwargs(opts)
1043 opts = _byteskwargs(opts)
1044 timer, fm = gettimer(ui, opts)
1044 timer, fm = gettimer(ui, opts)
1045
1045
1046 clearrevlogs = opts[b'clear_revlogs']
1046 clearrevlogs = opts[b'clear_revlogs']
1047
1047
1048 def s():
1048 def s():
1049 if clearrevlogs:
1049 if clearrevlogs:
1050 clearchangelog(repo)
1050 clearchangelog(repo)
1051 clearfilecache(repo, b'_bookmarks')
1051 clearfilecache(repo, b'_bookmarks')
1052
1052
1053 def d():
1053 def d():
1054 repo._bookmarks
1054 repo._bookmarks
1055
1055
1056 timer(d, setup=s)
1056 timer(d, setup=s)
1057 fm.end()
1057 fm.end()
1058
1058
1059
1059
1060 @command(
1060 @command(
1061 b'perf::bundle',
1061 b'perf::bundle',
1062 [
1062 [
1063 (
1063 (
1064 b'r',
1064 b'r',
1065 b'rev',
1065 b'rev',
1066 [],
1066 [],
1067 b'changesets to bundle',
1067 b'changesets to bundle',
1068 b'REV',
1068 b'REV',
1069 ),
1069 ),
1070 (
1070 (
1071 b't',
1071 b't',
1072 b'type',
1072 b'type',
1073 b'none',
1073 b'none',
1074 b'bundlespec to use (see `hg help bundlespec`)',
1074 b'bundlespec to use (see `hg help bundlespec`)',
1075 b'TYPE',
1075 b'TYPE',
1076 ),
1076 ),
1077 ]
1077 ]
1078 + formatteropts,
1078 + formatteropts,
1079 b'REVS',
1079 b'REVS',
1080 )
1080 )
1081 def perfbundle(ui, repo, *revs, **opts):
1081 def perfbundle(ui, repo, *revs, **opts):
1082 """benchmark the creation of a bundle from a repository
1082 """benchmark the creation of a bundle from a repository
1083
1083
1084 For now, this only supports "none" compression.
1084 For now, this only supports "none" compression.
1085 """
1085 """
1086 try:
1086 try:
1087 from mercurial import bundlecaches
1087 from mercurial import bundlecaches
1088
1088
1089 parsebundlespec = bundlecaches.parsebundlespec
1089 parsebundlespec = bundlecaches.parsebundlespec
1090 except ImportError:
1090 except ImportError:
1091 from mercurial import exchange
1091 from mercurial import exchange
1092
1092
1093 parsebundlespec = exchange.parsebundlespec
1093 parsebundlespec = exchange.parsebundlespec
1094
1094
1095 from mercurial import discovery
1095 from mercurial import discovery
1096 from mercurial import bundle2
1096 from mercurial import bundle2
1097
1097
1098 opts = _byteskwargs(opts)
1098 opts = _byteskwargs(opts)
1099 timer, fm = gettimer(ui, opts)
1099 timer, fm = gettimer(ui, opts)
1100
1100
1101 cl = repo.changelog
1101 cl = repo.changelog
1102 revs = list(revs)
1102 revs = list(revs)
1103 revs.extend(opts.get(b'rev', ()))
1103 revs.extend(opts.get(b'rev', ()))
1104 revs = scmutil.revrange(repo, revs)
1104 revs = scmutil.revrange(repo, revs)
1105 if not revs:
1105 if not revs:
1106 raise error.Abort(b"not revision specified")
1106 raise error.Abort(b"not revision specified")
1107 # make it a consistent set (ie: without topological gaps)
1107 # make it a consistent set (ie: without topological gaps)
1108 old_len = len(revs)
1108 old_len = len(revs)
1109 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1109 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1110 if old_len != len(revs):
1110 if old_len != len(revs):
1111 new_count = len(revs) - old_len
1111 new_count = len(revs) - old_len
1112 msg = b"add %d new revisions to make it a consistent set\n"
1112 msg = b"add %d new revisions to make it a consistent set\n"
1113 ui.write_err(msg % new_count)
1113 ui.write_err(msg % new_count)
1114
1114
1115 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1115 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1116 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1116 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1117 outgoing = discovery.outgoing(repo, bases, targets)
1117 outgoing = discovery.outgoing(repo, bases, targets)
1118
1118
1119 bundle_spec = opts.get(b'type')
1119 bundle_spec = opts.get(b'type')
1120
1120
1121 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1121 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1122
1122
1123 cgversion = bundle_spec.params.get(b"cg.version")
1123 cgversion = bundle_spec.params.get(b"cg.version")
1124 if cgversion is None:
1124 if cgversion is None:
1125 if bundle_spec.version == b'v1':
1125 if bundle_spec.version == b'v1':
1126 cgversion = b'01'
1126 cgversion = b'01'
1127 if bundle_spec.version == b'v2':
1127 if bundle_spec.version == b'v2':
1128 cgversion = b'02'
1128 cgversion = b'02'
1129 if cgversion not in changegroup.supportedoutgoingversions(repo):
1129 if cgversion not in changegroup.supportedoutgoingversions(repo):
1130 err = b"repository does not support bundle version %s"
1130 err = b"repository does not support bundle version %s"
1131 raise error.Abort(err % cgversion)
1131 raise error.Abort(err % cgversion)
1132
1132
1133 if cgversion == b'01': # bundle1
1133 if cgversion == b'01': # bundle1
1134 bversion = b'HG10' + bundle_spec.wirecompression
1134 bversion = b'HG10' + bundle_spec.wirecompression
1135 bcompression = None
1135 bcompression = None
1136 elif cgversion in (b'02', b'03'):
1136 elif cgversion in (b'02', b'03'):
1137 bversion = b'HG20'
1137 bversion = b'HG20'
1138 bcompression = bundle_spec.wirecompression
1138 bcompression = bundle_spec.wirecompression
1139 else:
1139 else:
1140 err = b'perf::bundle: unexpected changegroup version %s'
1140 err = b'perf::bundle: unexpected changegroup version %s'
1141 raise error.ProgrammingError(err % cgversion)
1141 raise error.ProgrammingError(err % cgversion)
1142
1142
1143 if bcompression is None:
1143 if bcompression is None:
1144 bcompression = b'UN'
1144 bcompression = b'UN'
1145
1145
1146 if bcompression != b'UN':
1146 if bcompression != b'UN':
1147 err = b'perf::bundle: compression currently unsupported: %s'
1147 err = b'perf::bundle: compression currently unsupported: %s'
1148 raise error.ProgrammingError(err % bcompression)
1148 raise error.ProgrammingError(err % bcompression)
1149
1149
1150 def do_bundle():
1150 def do_bundle():
1151 bundle2.writenewbundle(
1151 bundle2.writenewbundle(
1152 ui,
1152 ui,
1153 repo,
1153 repo,
1154 b'perf::bundle',
1154 b'perf::bundle',
1155 os.devnull,
1155 os.devnull,
1156 bversion,
1156 bversion,
1157 outgoing,
1157 outgoing,
1158 bundle_spec.params,
1158 bundle_spec.params,
1159 )
1159 )
1160
1160
1161 timer(do_bundle)
1161 timer(do_bundle)
1162 fm.end()
1162 fm.end()
1163
1163
1164
1164
1165 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1165 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1166 def perfbundleread(ui, repo, bundlepath, **opts):
1166 def perfbundleread(ui, repo, bundlepath, **opts):
1167 """Benchmark reading of bundle files.
1167 """Benchmark reading of bundle files.
1168
1168
1169 This command is meant to isolate the I/O part of bundle reading as
1169 This command is meant to isolate the I/O part of bundle reading as
1170 much as possible.
1170 much as possible.
1171 """
1171 """
1172 from mercurial import (
1172 from mercurial import (
1173 bundle2,
1173 bundle2,
1174 exchange,
1174 exchange,
1175 streamclone,
1175 streamclone,
1176 )
1176 )
1177
1177
1178 opts = _byteskwargs(opts)
1178 opts = _byteskwargs(opts)
1179
1179
1180 def makebench(fn):
1180 def makebench(fn):
1181 def run():
1181 def run():
1182 with open(bundlepath, b'rb') as fh:
1182 with open(bundlepath, b'rb') as fh:
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1184 fn(bundle)
1184 fn(bundle)
1185
1185
1186 return run
1186 return run
1187
1187
1188 def makereadnbytes(size):
1188 def makereadnbytes(size):
1189 def run():
1189 def run():
1190 with open(bundlepath, b'rb') as fh:
1190 with open(bundlepath, b'rb') as fh:
1191 bundle = exchange.readbundle(ui, fh, bundlepath)
1191 bundle = exchange.readbundle(ui, fh, bundlepath)
1192 while bundle.read(size):
1192 while bundle.read(size):
1193 pass
1193 pass
1194
1194
1195 return run
1195 return run
1196
1196
1197 def makestdioread(size):
1197 def makestdioread(size):
1198 def run():
1198 def run():
1199 with open(bundlepath, b'rb') as fh:
1199 with open(bundlepath, b'rb') as fh:
1200 while fh.read(size):
1200 while fh.read(size):
1201 pass
1201 pass
1202
1202
1203 return run
1203 return run
1204
1204
1205 # bundle1
1205 # bundle1
1206
1206
1207 def deltaiter(bundle):
1207 def deltaiter(bundle):
1208 for delta in bundle.deltaiter():
1208 for delta in bundle.deltaiter():
1209 pass
1209 pass
1210
1210
1211 def iterchunks(bundle):
1211 def iterchunks(bundle):
1212 for chunk in bundle.getchunks():
1212 for chunk in bundle.getchunks():
1213 pass
1213 pass
1214
1214
1215 # bundle2
1215 # bundle2
1216
1216
1217 def forwardchunks(bundle):
1217 def forwardchunks(bundle):
1218 for chunk in bundle._forwardchunks():
1218 for chunk in bundle._forwardchunks():
1219 pass
1219 pass
1220
1220
1221 def iterparts(bundle):
1221 def iterparts(bundle):
1222 for part in bundle.iterparts():
1222 for part in bundle.iterparts():
1223 pass
1223 pass
1224
1224
1225 def iterpartsseekable(bundle):
1225 def iterpartsseekable(bundle):
1226 for part in bundle.iterparts(seekable=True):
1226 for part in bundle.iterparts(seekable=True):
1227 pass
1227 pass
1228
1228
1229 def seek(bundle):
1229 def seek(bundle):
1230 for part in bundle.iterparts(seekable=True):
1230 for part in bundle.iterparts(seekable=True):
1231 part.seek(0, os.SEEK_END)
1231 part.seek(0, os.SEEK_END)
1232
1232
1233 def makepartreadnbytes(size):
1233 def makepartreadnbytes(size):
1234 def run():
1234 def run():
1235 with open(bundlepath, b'rb') as fh:
1235 with open(bundlepath, b'rb') as fh:
1236 bundle = exchange.readbundle(ui, fh, bundlepath)
1236 bundle = exchange.readbundle(ui, fh, bundlepath)
1237 for part in bundle.iterparts():
1237 for part in bundle.iterparts():
1238 while part.read(size):
1238 while part.read(size):
1239 pass
1239 pass
1240
1240
1241 return run
1241 return run
1242
1242
1243 benches = [
1243 benches = [
1244 (makestdioread(8192), b'read(8k)'),
1244 (makestdioread(8192), b'read(8k)'),
1245 (makestdioread(16384), b'read(16k)'),
1245 (makestdioread(16384), b'read(16k)'),
1246 (makestdioread(32768), b'read(32k)'),
1246 (makestdioread(32768), b'read(32k)'),
1247 (makestdioread(131072), b'read(128k)'),
1247 (makestdioread(131072), b'read(128k)'),
1248 ]
1248 ]
1249
1249
1250 with open(bundlepath, b'rb') as fh:
1250 with open(bundlepath, b'rb') as fh:
1251 bundle = exchange.readbundle(ui, fh, bundlepath)
1251 bundle = exchange.readbundle(ui, fh, bundlepath)
1252
1252
1253 if isinstance(bundle, changegroup.cg1unpacker):
1253 if isinstance(bundle, changegroup.cg1unpacker):
1254 benches.extend(
1254 benches.extend(
1255 [
1255 [
1256 (makebench(deltaiter), b'cg1 deltaiter()'),
1256 (makebench(deltaiter), b'cg1 deltaiter()'),
1257 (makebench(iterchunks), b'cg1 getchunks()'),
1257 (makebench(iterchunks), b'cg1 getchunks()'),
1258 (makereadnbytes(8192), b'cg1 read(8k)'),
1258 (makereadnbytes(8192), b'cg1 read(8k)'),
1259 (makereadnbytes(16384), b'cg1 read(16k)'),
1259 (makereadnbytes(16384), b'cg1 read(16k)'),
1260 (makereadnbytes(32768), b'cg1 read(32k)'),
1260 (makereadnbytes(32768), b'cg1 read(32k)'),
1261 (makereadnbytes(131072), b'cg1 read(128k)'),
1261 (makereadnbytes(131072), b'cg1 read(128k)'),
1262 ]
1262 ]
1263 )
1263 )
1264 elif isinstance(bundle, bundle2.unbundle20):
1264 elif isinstance(bundle, bundle2.unbundle20):
1265 benches.extend(
1265 benches.extend(
1266 [
1266 [
1267 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1267 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1268 (makebench(iterparts), b'bundle2 iterparts()'),
1268 (makebench(iterparts), b'bundle2 iterparts()'),
1269 (
1269 (
1270 makebench(iterpartsseekable),
1270 makebench(iterpartsseekable),
1271 b'bundle2 iterparts() seekable',
1271 b'bundle2 iterparts() seekable',
1272 ),
1272 ),
1273 (makebench(seek), b'bundle2 part seek()'),
1273 (makebench(seek), b'bundle2 part seek()'),
1274 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1274 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1275 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1275 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1276 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1276 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1277 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1277 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1278 ]
1278 ]
1279 )
1279 )
1280 elif isinstance(bundle, streamclone.streamcloneapplier):
1280 elif isinstance(bundle, streamclone.streamcloneapplier):
1281 raise error.Abort(b'stream clone bundles not supported')
1281 raise error.Abort(b'stream clone bundles not supported')
1282 else:
1282 else:
1283 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1283 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1284
1284
1285 for fn, title in benches:
1285 for fn, title in benches:
1286 timer, fm = gettimer(ui, opts)
1286 timer, fm = gettimer(ui, opts)
1287 timer(fn, title=title)
1287 timer(fn, title=title)
1288 fm.end()
1288 fm.end()
1289
1289
1290
1290
1291 @command(
1291 @command(
1292 b'perf::changegroupchangelog|perfchangegroupchangelog',
1292 b'perf::changegroupchangelog|perfchangegroupchangelog',
1293 formatteropts
1293 formatteropts
1294 + [
1294 + [
1295 (b'', b'cgversion', b'02', b'changegroup version'),
1295 (b'', b'cgversion', b'02', b'changegroup version'),
1296 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1296 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1297 ],
1297 ],
1298 )
1298 )
1299 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1299 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1300 """Benchmark producing a changelog group for a changegroup.
1300 """Benchmark producing a changelog group for a changegroup.
1301
1301
1302 This measures the time spent processing the changelog during a
1302 This measures the time spent processing the changelog during a
1303 bundle operation. This occurs during `hg bundle` and on a server
1303 bundle operation. This occurs during `hg bundle` and on a server
1304 processing a `getbundle` wire protocol request (handles clones
1304 processing a `getbundle` wire protocol request (handles clones
1305 and pull requests).
1305 and pull requests).
1306
1306
1307 By default, all revisions are added to the changegroup.
1307 By default, all revisions are added to the changegroup.
1308 """
1308 """
1309 opts = _byteskwargs(opts)
1309 opts = _byteskwargs(opts)
1310 cl = repo.changelog
1310 cl = repo.changelog
1311 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1311 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1312 bundler = changegroup.getbundler(cgversion, repo)
1312 bundler = changegroup.getbundler(cgversion, repo)
1313
1313
1314 def d():
1314 def d():
1315 state, chunks = bundler._generatechangelog(cl, nodes)
1315 state, chunks = bundler._generatechangelog(cl, nodes)
1316 for chunk in chunks:
1316 for chunk in chunks:
1317 pass
1317 pass
1318
1318
1319 timer, fm = gettimer(ui, opts)
1319 timer, fm = gettimer(ui, opts)
1320
1320
1321 # Terminal printing can interfere with timing. So disable it.
1321 # Terminal printing can interfere with timing. So disable it.
1322 with ui.configoverride({(b'progress', b'disable'): True}):
1322 with ui.configoverride({(b'progress', b'disable'): True}):
1323 timer(d)
1323 timer(d)
1324
1324
1325 fm.end()
1325 fm.end()
1326
1326
1327
1327
1328 @command(b'perf::dirs|perfdirs', formatteropts)
1328 @command(b'perf::dirs|perfdirs', formatteropts)
1329 def perfdirs(ui, repo, **opts):
1329 def perfdirs(ui, repo, **opts):
1330 opts = _byteskwargs(opts)
1330 opts = _byteskwargs(opts)
1331 timer, fm = gettimer(ui, opts)
1331 timer, fm = gettimer(ui, opts)
1332 dirstate = repo.dirstate
1332 dirstate = repo.dirstate
1333 b'a' in dirstate
1333 b'a' in dirstate
1334
1334
1335 def d():
1335 def d():
1336 dirstate.hasdir(b'a')
1336 dirstate.hasdir(b'a')
1337 try:
1337 try:
1338 del dirstate._map._dirs
1338 del dirstate._map._dirs
1339 except AttributeError:
1339 except AttributeError:
1340 pass
1340 pass
1341
1341
1342 timer(d)
1342 timer(d)
1343 fm.end()
1343 fm.end()
1344
1344
1345
1345
1346 @command(
1346 @command(
1347 b'perf::dirstate|perfdirstate',
1347 b'perf::dirstate|perfdirstate',
1348 [
1348 [
1349 (
1349 (
1350 b'',
1350 b'',
1351 b'iteration',
1351 b'iteration',
1352 None,
1352 None,
1353 b'benchmark a full iteration for the dirstate',
1353 b'benchmark a full iteration for the dirstate',
1354 ),
1354 ),
1355 (
1355 (
1356 b'',
1356 b'',
1357 b'contains',
1357 b'contains',
1358 None,
1358 None,
1359 b'benchmark a large amount of `nf in dirstate` calls',
1359 b'benchmark a large amount of `nf in dirstate` calls',
1360 ),
1360 ),
1361 ]
1361 ]
1362 + formatteropts,
1362 + formatteropts,
1363 )
1363 )
1364 def perfdirstate(ui, repo, **opts):
1364 def perfdirstate(ui, repo, **opts):
1365 """benchmap the time of various distate operations
1365 """benchmap the time of various distate operations
1366
1366
1367 By default benchmark the time necessary to load a dirstate from scratch.
1367 By default benchmark the time necessary to load a dirstate from scratch.
1368 The dirstate is loaded to the point were a "contains" request can be
1368 The dirstate is loaded to the point were a "contains" request can be
1369 answered.
1369 answered.
1370 """
1370 """
1371 opts = _byteskwargs(opts)
1371 opts = _byteskwargs(opts)
1372 timer, fm = gettimer(ui, opts)
1372 timer, fm = gettimer(ui, opts)
1373 b"a" in repo.dirstate
1373 b"a" in repo.dirstate
1374
1374
1375 if opts[b'iteration'] and opts[b'contains']:
1375 if opts[b'iteration'] and opts[b'contains']:
1376 msg = b'only specify one of --iteration or --contains'
1376 msg = b'only specify one of --iteration or --contains'
1377 raise error.Abort(msg)
1377 raise error.Abort(msg)
1378
1378
1379 if opts[b'iteration']:
1379 if opts[b'iteration']:
1380 setup = None
1380 setup = None
1381 dirstate = repo.dirstate
1381 dirstate = repo.dirstate
1382
1382
1383 def d():
1383 def d():
1384 for f in dirstate:
1384 for f in dirstate:
1385 pass
1385 pass
1386
1386
1387 elif opts[b'contains']:
1387 elif opts[b'contains']:
1388 setup = None
1388 setup = None
1389 dirstate = repo.dirstate
1389 dirstate = repo.dirstate
1390 allfiles = list(dirstate)
1390 allfiles = list(dirstate)
1391 # also add file path that will be "missing" from the dirstate
1391 # also add file path that will be "missing" from the dirstate
1392 allfiles.extend([f[::-1] for f in allfiles])
1392 allfiles.extend([f[::-1] for f in allfiles])
1393
1393
1394 def d():
1394 def d():
1395 for f in allfiles:
1395 for f in allfiles:
1396 f in dirstate
1396 f in dirstate
1397
1397
1398 else:
1398 else:
1399
1399
1400 def setup():
1400 def setup():
1401 repo.dirstate.invalidate()
1401 repo.dirstate.invalidate()
1402
1402
1403 def d():
1403 def d():
1404 b"a" in repo.dirstate
1404 b"a" in repo.dirstate
1405
1405
1406 timer(d, setup=setup)
1406 timer(d, setup=setup)
1407 fm.end()
1407 fm.end()
1408
1408
1409
1409
1410 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1410 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1411 def perfdirstatedirs(ui, repo, **opts):
1411 def perfdirstatedirs(ui, repo, **opts):
1412 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1412 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1413 opts = _byteskwargs(opts)
1413 opts = _byteskwargs(opts)
1414 timer, fm = gettimer(ui, opts)
1414 timer, fm = gettimer(ui, opts)
1415 repo.dirstate.hasdir(b"a")
1415 repo.dirstate.hasdir(b"a")
1416
1416
1417 def setup():
1417 def setup():
1418 try:
1418 try:
1419 del repo.dirstate._map._dirs
1419 del repo.dirstate._map._dirs
1420 except AttributeError:
1420 except AttributeError:
1421 pass
1421 pass
1422
1422
1423 def d():
1423 def d():
1424 repo.dirstate.hasdir(b"a")
1424 repo.dirstate.hasdir(b"a")
1425
1425
1426 timer(d, setup=setup)
1426 timer(d, setup=setup)
1427 fm.end()
1427 fm.end()
1428
1428
1429
1429
1430 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1430 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1431 def perfdirstatefoldmap(ui, repo, **opts):
1431 def perfdirstatefoldmap(ui, repo, **opts):
1432 """benchmap a `dirstate._map.filefoldmap.get()` request
1432 """benchmap a `dirstate._map.filefoldmap.get()` request
1433
1433
1434 The dirstate filefoldmap cache is dropped between every request.
1434 The dirstate filefoldmap cache is dropped between every request.
1435 """
1435 """
1436 opts = _byteskwargs(opts)
1436 opts = _byteskwargs(opts)
1437 timer, fm = gettimer(ui, opts)
1437 timer, fm = gettimer(ui, opts)
1438 dirstate = repo.dirstate
1438 dirstate = repo.dirstate
1439 dirstate._map.filefoldmap.get(b'a')
1439 dirstate._map.filefoldmap.get(b'a')
1440
1440
1441 def setup():
1441 def setup():
1442 del dirstate._map.filefoldmap
1442 del dirstate._map.filefoldmap
1443
1443
1444 def d():
1444 def d():
1445 dirstate._map.filefoldmap.get(b'a')
1445 dirstate._map.filefoldmap.get(b'a')
1446
1446
1447 timer(d, setup=setup)
1447 timer(d, setup=setup)
1448 fm.end()
1448 fm.end()
1449
1449
1450
1450
1451 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1451 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1452 def perfdirfoldmap(ui, repo, **opts):
1452 def perfdirfoldmap(ui, repo, **opts):
1453 """benchmap a `dirstate._map.dirfoldmap.get()` request
1453 """benchmap a `dirstate._map.dirfoldmap.get()` request
1454
1454
1455 The dirstate dirfoldmap cache is dropped between every request.
1455 The dirstate dirfoldmap cache is dropped between every request.
1456 """
1456 """
1457 opts = _byteskwargs(opts)
1457 opts = _byteskwargs(opts)
1458 timer, fm = gettimer(ui, opts)
1458 timer, fm = gettimer(ui, opts)
1459 dirstate = repo.dirstate
1459 dirstate = repo.dirstate
1460 dirstate._map.dirfoldmap.get(b'a')
1460 dirstate._map.dirfoldmap.get(b'a')
1461
1461
1462 def setup():
1462 def setup():
1463 del dirstate._map.dirfoldmap
1463 del dirstate._map.dirfoldmap
1464 try:
1464 try:
1465 del dirstate._map._dirs
1465 del dirstate._map._dirs
1466 except AttributeError:
1466 except AttributeError:
1467 pass
1467 pass
1468
1468
1469 def d():
1469 def d():
1470 dirstate._map.dirfoldmap.get(b'a')
1470 dirstate._map.dirfoldmap.get(b'a')
1471
1471
1472 timer(d, setup=setup)
1472 timer(d, setup=setup)
1473 fm.end()
1473 fm.end()
1474
1474
1475
1475
1476 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1476 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1477 def perfdirstatewrite(ui, repo, **opts):
1477 def perfdirstatewrite(ui, repo, **opts):
1478 """benchmap the time it take to write a dirstate on disk"""
1478 """benchmap the time it take to write a dirstate on disk"""
1479 opts = _byteskwargs(opts)
1479 opts = _byteskwargs(opts)
1480 timer, fm = gettimer(ui, opts)
1480 timer, fm = gettimer(ui, opts)
1481 ds = repo.dirstate
1481 ds = repo.dirstate
1482 b"a" in ds
1482 b"a" in ds
1483
1483
1484 def setup():
1484 def setup():
1485 ds._dirty = True
1485 ds._dirty = True
1486
1486
1487 def d():
1487 def d():
1488 ds.write(repo.currenttransaction())
1488 ds.write(repo.currenttransaction())
1489
1489
1490 with repo.wlock():
1490 with repo.wlock():
1491 timer(d, setup=setup)
1491 timer(d, setup=setup)
1492 fm.end()
1492 fm.end()
1493
1493
1494
1494
1495 def _getmergerevs(repo, opts):
1495 def _getmergerevs(repo, opts):
1496 """parse command argument to return rev involved in merge
1496 """parse command argument to return rev involved in merge
1497
1497
1498 input: options dictionnary with `rev`, `from` and `bse`
1498 input: options dictionnary with `rev`, `from` and `bse`
1499 output: (localctx, otherctx, basectx)
1499 output: (localctx, otherctx, basectx)
1500 """
1500 """
1501 if opts[b'from']:
1501 if opts[b'from']:
1502 fromrev = scmutil.revsingle(repo, opts[b'from'])
1502 fromrev = scmutil.revsingle(repo, opts[b'from'])
1503 wctx = repo[fromrev]
1503 wctx = repo[fromrev]
1504 else:
1504 else:
1505 wctx = repo[None]
1505 wctx = repo[None]
1506 # we don't want working dir files to be stat'd in the benchmark, so
1506 # we don't want working dir files to be stat'd in the benchmark, so
1507 # prime that cache
1507 # prime that cache
1508 wctx.dirty()
1508 wctx.dirty()
1509 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1509 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1510 if opts[b'base']:
1510 if opts[b'base']:
1511 fromrev = scmutil.revsingle(repo, opts[b'base'])
1511 fromrev = scmutil.revsingle(repo, opts[b'base'])
1512 ancestor = repo[fromrev]
1512 ancestor = repo[fromrev]
1513 else:
1513 else:
1514 ancestor = wctx.ancestor(rctx)
1514 ancestor = wctx.ancestor(rctx)
1515 return (wctx, rctx, ancestor)
1515 return (wctx, rctx, ancestor)
1516
1516
1517
1517
1518 @command(
1518 @command(
1519 b'perf::mergecalculate|perfmergecalculate',
1519 b'perf::mergecalculate|perfmergecalculate',
1520 [
1520 [
1521 (b'r', b'rev', b'.', b'rev to merge against'),
1521 (b'r', b'rev', b'.', b'rev to merge against'),
1522 (b'', b'from', b'', b'rev to merge from'),
1522 (b'', b'from', b'', b'rev to merge from'),
1523 (b'', b'base', b'', b'the revision to use as base'),
1523 (b'', b'base', b'', b'the revision to use as base'),
1524 ]
1524 ]
1525 + formatteropts,
1525 + formatteropts,
1526 )
1526 )
1527 def perfmergecalculate(ui, repo, **opts):
1527 def perfmergecalculate(ui, repo, **opts):
1528 opts = _byteskwargs(opts)
1528 opts = _byteskwargs(opts)
1529 timer, fm = gettimer(ui, opts)
1529 timer, fm = gettimer(ui, opts)
1530
1530
1531 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1531 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1532
1532
1533 def d():
1533 def d():
1534 # acceptremote is True because we don't want prompts in the middle of
1534 # acceptremote is True because we don't want prompts in the middle of
1535 # our benchmark
1535 # our benchmark
1536 merge.calculateupdates(
1536 merge.calculateupdates(
1537 repo,
1537 repo,
1538 wctx,
1538 wctx,
1539 rctx,
1539 rctx,
1540 [ancestor],
1540 [ancestor],
1541 branchmerge=False,
1541 branchmerge=False,
1542 force=False,
1542 force=False,
1543 acceptremote=True,
1543 acceptremote=True,
1544 followcopies=True,
1544 followcopies=True,
1545 )
1545 )
1546
1546
1547 timer(d)
1547 timer(d)
1548 fm.end()
1548 fm.end()
1549
1549
1550
1550
1551 @command(
1551 @command(
1552 b'perf::mergecopies|perfmergecopies',
1552 b'perf::mergecopies|perfmergecopies',
1553 [
1553 [
1554 (b'r', b'rev', b'.', b'rev to merge against'),
1554 (b'r', b'rev', b'.', b'rev to merge against'),
1555 (b'', b'from', b'', b'rev to merge from'),
1555 (b'', b'from', b'', b'rev to merge from'),
1556 (b'', b'base', b'', b'the revision to use as base'),
1556 (b'', b'base', b'', b'the revision to use as base'),
1557 ]
1557 ]
1558 + formatteropts,
1558 + formatteropts,
1559 )
1559 )
1560 def perfmergecopies(ui, repo, **opts):
1560 def perfmergecopies(ui, repo, **opts):
1561 """measure runtime of `copies.mergecopies`"""
1561 """measure runtime of `copies.mergecopies`"""
1562 opts = _byteskwargs(opts)
1562 opts = _byteskwargs(opts)
1563 timer, fm = gettimer(ui, opts)
1563 timer, fm = gettimer(ui, opts)
1564 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1564 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1565
1565
1566 def d():
1566 def d():
1567 # acceptremote is True because we don't want prompts in the middle of
1567 # acceptremote is True because we don't want prompts in the middle of
1568 # our benchmark
1568 # our benchmark
1569 copies.mergecopies(repo, wctx, rctx, ancestor)
1569 copies.mergecopies(repo, wctx, rctx, ancestor)
1570
1570
1571 timer(d)
1571 timer(d)
1572 fm.end()
1572 fm.end()
1573
1573
1574
1574
1575 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1575 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1576 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1576 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1577 """benchmark the copy tracing logic"""
1577 """benchmark the copy tracing logic"""
1578 opts = _byteskwargs(opts)
1578 opts = _byteskwargs(opts)
1579 timer, fm = gettimer(ui, opts)
1579 timer, fm = gettimer(ui, opts)
1580 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1580 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1581 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1581 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1582
1582
1583 def d():
1583 def d():
1584 copies.pathcopies(ctx1, ctx2)
1584 copies.pathcopies(ctx1, ctx2)
1585
1585
1586 timer(d)
1586 timer(d)
1587 fm.end()
1587 fm.end()
1588
1588
1589
1589
1590 @command(
1590 @command(
1591 b'perf::phases|perfphases',
1591 b'perf::phases|perfphases',
1592 [
1592 [
1593 (b'', b'full', False, b'include file reading time too'),
1593 (b'', b'full', False, b'include file reading time too'),
1594 ],
1594 ],
1595 b"",
1595 b"",
1596 )
1596 )
1597 def perfphases(ui, repo, **opts):
1597 def perfphases(ui, repo, **opts):
1598 """benchmark phasesets computation"""
1598 """benchmark phasesets computation"""
1599 opts = _byteskwargs(opts)
1599 opts = _byteskwargs(opts)
1600 timer, fm = gettimer(ui, opts)
1600 timer, fm = gettimer(ui, opts)
1601 _phases = repo._phasecache
1601 _phases = repo._phasecache
1602 full = opts.get(b'full')
1602 full = opts.get(b'full')
1603
1603
1604 def d():
1604 def d():
1605 phases = _phases
1605 phases = _phases
1606 if full:
1606 if full:
1607 clearfilecache(repo, b'_phasecache')
1607 clearfilecache(repo, b'_phasecache')
1608 phases = repo._phasecache
1608 phases = repo._phasecache
1609 phases.invalidate()
1609 phases.invalidate()
1610 phases.loadphaserevs(repo)
1610 phases.loadphaserevs(repo)
1611
1611
1612 timer(d)
1612 timer(d)
1613 fm.end()
1613 fm.end()
1614
1614
1615
1615
1616 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1616 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1617 def perfphasesremote(ui, repo, dest=None, **opts):
1617 def perfphasesremote(ui, repo, dest=None, **opts):
1618 """benchmark time needed to analyse phases of the remote server"""
1618 """benchmark time needed to analyse phases of the remote server"""
1619 from mercurial.node import bin
1619 from mercurial.node import bin
1620 from mercurial import (
1620 from mercurial import (
1621 exchange,
1621 exchange,
1622 hg,
1622 hg,
1623 phases,
1623 phases,
1624 )
1624 )
1625
1625
1626 opts = _byteskwargs(opts)
1626 opts = _byteskwargs(opts)
1627 timer, fm = gettimer(ui, opts)
1627 timer, fm = gettimer(ui, opts)
1628
1628
1629 path = ui.getpath(dest, default=(b'default-push', b'default'))
1629 path = ui.getpath(dest, default=(b'default-push', b'default'))
1630 if not path:
1630 if not path:
1631 raise error.Abort(
1631 raise error.Abort(
1632 b'default repository not configured!',
1632 b'default repository not configured!',
1633 hint=b"see 'hg help config.paths'",
1633 hint=b"see 'hg help config.paths'",
1634 )
1634 )
1635 if util.safehasattr(path, 'main_path'):
1635 if util.safehasattr(path, 'main_path'):
1636 path = path.get_push_variant()
1636 path = path.get_push_variant()
1637 dest = path.loc
1637 dest = path.loc
1638 else:
1638 else:
1639 dest = path.pushloc or path.loc
1639 dest = path.pushloc or path.loc
1640 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1640 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1641 other = hg.peer(repo, opts, dest)
1641 other = hg.peer(repo, opts, dest)
1642
1642
1643 # easier to perform discovery through the operation
1643 # easier to perform discovery through the operation
1644 op = exchange.pushoperation(repo, other)
1644 op = exchange.pushoperation(repo, other)
1645 exchange._pushdiscoverychangeset(op)
1645 exchange._pushdiscoverychangeset(op)
1646
1646
1647 remotesubset = op.fallbackheads
1647 remotesubset = op.fallbackheads
1648
1648
1649 with other.commandexecutor() as e:
1649 with other.commandexecutor() as e:
1650 remotephases = e.callcommand(
1650 remotephases = e.callcommand(
1651 b'listkeys', {b'namespace': b'phases'}
1651 b'listkeys', {b'namespace': b'phases'}
1652 ).result()
1652 ).result()
1653 del other
1653 del other
1654 publishing = remotephases.get(b'publishing', False)
1654 publishing = remotephases.get(b'publishing', False)
1655 if publishing:
1655 if publishing:
1656 ui.statusnoi18n(b'publishing: yes\n')
1656 ui.statusnoi18n(b'publishing: yes\n')
1657 else:
1657 else:
1658 ui.statusnoi18n(b'publishing: no\n')
1658 ui.statusnoi18n(b'publishing: no\n')
1659
1659
1660 has_node = getattr(repo.changelog.index, 'has_node', None)
1660 has_node = getattr(repo.changelog.index, 'has_node', None)
1661 if has_node is None:
1661 if has_node is None:
1662 has_node = repo.changelog.nodemap.__contains__
1662 has_node = repo.changelog.nodemap.__contains__
1663 nonpublishroots = 0
1663 nonpublishroots = 0
1664 for nhex, phase in remotephases.iteritems():
1664 for nhex, phase in remotephases.iteritems():
1665 if nhex == b'publishing': # ignore data related to publish option
1665 if nhex == b'publishing': # ignore data related to publish option
1666 continue
1666 continue
1667 node = bin(nhex)
1667 node = bin(nhex)
1668 if has_node(node) and int(phase):
1668 if has_node(node) and int(phase):
1669 nonpublishroots += 1
1669 nonpublishroots += 1
1670 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1670 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1671 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1671 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1672
1672
1673 def d():
1673 def d():
1674 phases.remotephasessummary(repo, remotesubset, remotephases)
1674 phases.remotephasessummary(repo, remotesubset, remotephases)
1675
1675
1676 timer(d)
1676 timer(d)
1677 fm.end()
1677 fm.end()
1678
1678
1679
1679
1680 @command(
1680 @command(
1681 b'perf::manifest|perfmanifest',
1681 b'perf::manifest|perfmanifest',
1682 [
1682 [
1683 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1683 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1684 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1684 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1685 ]
1685 ]
1686 + formatteropts,
1686 + formatteropts,
1687 b'REV|NODE',
1687 b'REV|NODE',
1688 )
1688 )
1689 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1689 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1690 """benchmark the time to read a manifest from disk and return a usable
1690 """benchmark the time to read a manifest from disk and return a usable
1691 dict-like object
1691 dict-like object
1692
1692
1693 Manifest caches are cleared before retrieval."""
1693 Manifest caches are cleared before retrieval."""
1694 opts = _byteskwargs(opts)
1694 opts = _byteskwargs(opts)
1695 timer, fm = gettimer(ui, opts)
1695 timer, fm = gettimer(ui, opts)
1696 if not manifest_rev:
1696 if not manifest_rev:
1697 ctx = scmutil.revsingle(repo, rev, rev)
1697 ctx = scmutil.revsingle(repo, rev, rev)
1698 t = ctx.manifestnode()
1698 t = ctx.manifestnode()
1699 else:
1699 else:
1700 from mercurial.node import bin
1700 from mercurial.node import bin
1701
1701
1702 if len(rev) == 40:
1702 if len(rev) == 40:
1703 t = bin(rev)
1703 t = bin(rev)
1704 else:
1704 else:
1705 try:
1705 try:
1706 rev = int(rev)
1706 rev = int(rev)
1707
1707
1708 if util.safehasattr(repo.manifestlog, b'getstorage'):
1708 if util.safehasattr(repo.manifestlog, b'getstorage'):
1709 t = repo.manifestlog.getstorage(b'').node(rev)
1709 t = repo.manifestlog.getstorage(b'').node(rev)
1710 else:
1710 else:
1711 t = repo.manifestlog._revlog.lookup(rev)
1711 t = repo.manifestlog._revlog.lookup(rev)
1712 except ValueError:
1712 except ValueError:
1713 raise error.Abort(
1713 raise error.Abort(
1714 b'manifest revision must be integer or full node'
1714 b'manifest revision must be integer or full node'
1715 )
1715 )
1716
1716
1717 def d():
1717 def d():
1718 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1718 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1719 repo.manifestlog[t].read()
1719 repo.manifestlog[t].read()
1720
1720
1721 timer(d)
1721 timer(d)
1722 fm.end()
1722 fm.end()
1723
1723
1724
1724
1725 @command(b'perf::changeset|perfchangeset', formatteropts)
1725 @command(b'perf::changeset|perfchangeset', formatteropts)
1726 def perfchangeset(ui, repo, rev, **opts):
1726 def perfchangeset(ui, repo, rev, **opts):
1727 opts = _byteskwargs(opts)
1727 opts = _byteskwargs(opts)
1728 timer, fm = gettimer(ui, opts)
1728 timer, fm = gettimer(ui, opts)
1729 n = scmutil.revsingle(repo, rev).node()
1729 n = scmutil.revsingle(repo, rev).node()
1730
1730
1731 def d():
1731 def d():
1732 repo.changelog.read(n)
1732 repo.changelog.read(n)
1733 # repo.changelog._cache = None
1733 # repo.changelog._cache = None
1734
1734
1735 timer(d)
1735 timer(d)
1736 fm.end()
1736 fm.end()
1737
1737
1738
1738
1739 @command(b'perf::ignore|perfignore', formatteropts)
1739 @command(b'perf::ignore|perfignore', formatteropts)
1740 def perfignore(ui, repo, **opts):
1740 def perfignore(ui, repo, **opts):
1741 """benchmark operation related to computing ignore"""
1741 """benchmark operation related to computing ignore"""
1742 opts = _byteskwargs(opts)
1742 opts = _byteskwargs(opts)
1743 timer, fm = gettimer(ui, opts)
1743 timer, fm = gettimer(ui, opts)
1744 dirstate = repo.dirstate
1744 dirstate = repo.dirstate
1745
1745
1746 def setupone():
1746 def setupone():
1747 dirstate.invalidate()
1747 dirstate.invalidate()
1748 clearfilecache(dirstate, b'_ignore')
1748 clearfilecache(dirstate, b'_ignore')
1749
1749
1750 def runone():
1750 def runone():
1751 dirstate._ignore
1751 dirstate._ignore
1752
1752
1753 timer(runone, setup=setupone, title=b"load")
1753 timer(runone, setup=setupone, title=b"load")
1754 fm.end()
1754 fm.end()
1755
1755
1756
1756
1757 @command(
1757 @command(
1758 b'perf::index|perfindex',
1758 b'perf::index|perfindex',
1759 [
1759 [
1760 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1760 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1761 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1761 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1762 ]
1762 ]
1763 + formatteropts,
1763 + formatteropts,
1764 )
1764 )
1765 def perfindex(ui, repo, **opts):
1765 def perfindex(ui, repo, **opts):
1766 """benchmark index creation time followed by a lookup
1766 """benchmark index creation time followed by a lookup
1767
1767
1768 The default is to look `tip` up. Depending on the index implementation,
1768 The default is to look `tip` up. Depending on the index implementation,
1769 the revision looked up can matters. For example, an implementation
1769 the revision looked up can matters. For example, an implementation
1770 scanning the index will have a faster lookup time for `--rev tip` than for
1770 scanning the index will have a faster lookup time for `--rev tip` than for
1771 `--rev 0`. The number of looked up revisions and their order can also
1771 `--rev 0`. The number of looked up revisions and their order can also
1772 matters.
1772 matters.
1773
1773
1774 Example of useful set to test:
1774 Example of useful set to test:
1775
1775
1776 * tip
1776 * tip
1777 * 0
1777 * 0
1778 * -10:
1778 * -10:
1779 * :10
1779 * :10
1780 * -10: + :10
1780 * -10: + :10
1781 * :10: + -10:
1781 * :10: + -10:
1782 * -10000:
1782 * -10000:
1783 * -10000: + 0
1783 * -10000: + 0
1784
1784
1785 It is not currently possible to check for lookup of a missing node. For
1785 It is not currently possible to check for lookup of a missing node. For
1786 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1786 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1787 import mercurial.revlog
1787 import mercurial.revlog
1788
1788
1789 opts = _byteskwargs(opts)
1789 opts = _byteskwargs(opts)
1790 timer, fm = gettimer(ui, opts)
1790 timer, fm = gettimer(ui, opts)
1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1792 if opts[b'no_lookup']:
1792 if opts[b'no_lookup']:
1793 if opts['rev']:
1793 if opts['rev']:
1794 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1794 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1795 nodes = []
1795 nodes = []
1796 elif not opts[b'rev']:
1796 elif not opts[b'rev']:
1797 nodes = [repo[b"tip"].node()]
1797 nodes = [repo[b"tip"].node()]
1798 else:
1798 else:
1799 revs = scmutil.revrange(repo, opts[b'rev'])
1799 revs = scmutil.revrange(repo, opts[b'rev'])
1800 cl = repo.changelog
1800 cl = repo.changelog
1801 nodes = [cl.node(r) for r in revs]
1801 nodes = [cl.node(r) for r in revs]
1802
1802
1803 unfi = repo.unfiltered()
1803 unfi = repo.unfiltered()
1804 # find the filecache func directly
1804 # find the filecache func directly
1805 # This avoid polluting the benchmark with the filecache logic
1805 # This avoid polluting the benchmark with the filecache logic
1806 makecl = unfi.__class__.changelog.func
1806 makecl = unfi.__class__.changelog.func
1807
1807
1808 def setup():
1808 def setup():
1809 # probably not necessary, but for good measure
1809 # probably not necessary, but for good measure
1810 clearchangelog(unfi)
1810 clearchangelog(unfi)
1811
1811
1812 def d():
1812 def d():
1813 cl = makecl(unfi)
1813 cl = makecl(unfi)
1814 for n in nodes:
1814 for n in nodes:
1815 cl.rev(n)
1815 cl.rev(n)
1816
1816
1817 timer(d, setup=setup)
1817 timer(d, setup=setup)
1818 fm.end()
1818 fm.end()
1819
1819
1820
1820
1821 @command(
1821 @command(
1822 b'perf::nodemap|perfnodemap',
1822 b'perf::nodemap|perfnodemap',
1823 [
1823 [
1824 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1824 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1825 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1825 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1826 ]
1826 ]
1827 + formatteropts,
1827 + formatteropts,
1828 )
1828 )
1829 def perfnodemap(ui, repo, **opts):
1829 def perfnodemap(ui, repo, **opts):
1830 """benchmark the time necessary to look up revision from a cold nodemap
1830 """benchmark the time necessary to look up revision from a cold nodemap
1831
1831
1832 Depending on the implementation, the amount and order of revision we look
1832 Depending on the implementation, the amount and order of revision we look
1833 up can varies. Example of useful set to test:
1833 up can varies. Example of useful set to test:
1834 * tip
1834 * tip
1835 * 0
1835 * 0
1836 * -10:
1836 * -10:
1837 * :10
1837 * :10
1838 * -10: + :10
1838 * -10: + :10
1839 * :10: + -10:
1839 * :10: + -10:
1840 * -10000:
1840 * -10000:
1841 * -10000: + 0
1841 * -10000: + 0
1842
1842
1843 The command currently focus on valid binary lookup. Benchmarking for
1843 The command currently focus on valid binary lookup. Benchmarking for
1844 hexlookup, prefix lookup and missing lookup would also be valuable.
1844 hexlookup, prefix lookup and missing lookup would also be valuable.
1845 """
1845 """
1846 import mercurial.revlog
1846 import mercurial.revlog
1847
1847
1848 opts = _byteskwargs(opts)
1848 opts = _byteskwargs(opts)
1849 timer, fm = gettimer(ui, opts)
1849 timer, fm = gettimer(ui, opts)
1850 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1850 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1851
1851
1852 unfi = repo.unfiltered()
1852 unfi = repo.unfiltered()
1853 clearcaches = opts[b'clear_caches']
1853 clearcaches = opts[b'clear_caches']
1854 # find the filecache func directly
1854 # find the filecache func directly
1855 # This avoid polluting the benchmark with the filecache logic
1855 # This avoid polluting the benchmark with the filecache logic
1856 makecl = unfi.__class__.changelog.func
1856 makecl = unfi.__class__.changelog.func
1857 if not opts[b'rev']:
1857 if not opts[b'rev']:
1858 raise error.Abort(b'use --rev to specify revisions to look up')
1858 raise error.Abort(b'use --rev to specify revisions to look up')
1859 revs = scmutil.revrange(repo, opts[b'rev'])
1859 revs = scmutil.revrange(repo, opts[b'rev'])
1860 cl = repo.changelog
1860 cl = repo.changelog
1861 nodes = [cl.node(r) for r in revs]
1861 nodes = [cl.node(r) for r in revs]
1862
1862
1863 # use a list to pass reference to a nodemap from one closure to the next
1863 # use a list to pass reference to a nodemap from one closure to the next
1864 nodeget = [None]
1864 nodeget = [None]
1865
1865
1866 def setnodeget():
1866 def setnodeget():
1867 # probably not necessary, but for good measure
1867 # probably not necessary, but for good measure
1868 clearchangelog(unfi)
1868 clearchangelog(unfi)
1869 cl = makecl(unfi)
1869 cl = makecl(unfi)
1870 if util.safehasattr(cl.index, 'get_rev'):
1870 if util.safehasattr(cl.index, 'get_rev'):
1871 nodeget[0] = cl.index.get_rev
1871 nodeget[0] = cl.index.get_rev
1872 else:
1872 else:
1873 nodeget[0] = cl.nodemap.get
1873 nodeget[0] = cl.nodemap.get
1874
1874
1875 def d():
1875 def d():
1876 get = nodeget[0]
1876 get = nodeget[0]
1877 for n in nodes:
1877 for n in nodes:
1878 get(n)
1878 get(n)
1879
1879
1880 setup = None
1880 setup = None
1881 if clearcaches:
1881 if clearcaches:
1882
1882
1883 def setup():
1883 def setup():
1884 setnodeget()
1884 setnodeget()
1885
1885
1886 else:
1886 else:
1887 setnodeget()
1887 setnodeget()
1888 d() # prewarm the data structure
1888 d() # prewarm the data structure
1889 timer(d, setup=setup)
1889 timer(d, setup=setup)
1890 fm.end()
1890 fm.end()
1891
1891
1892
1892
1893 @command(b'perf::startup|perfstartup', formatteropts)
1893 @command(b'perf::startup|perfstartup', formatteropts)
1894 def perfstartup(ui, repo, **opts):
1894 def perfstartup(ui, repo, **opts):
1895 opts = _byteskwargs(opts)
1895 opts = _byteskwargs(opts)
1896 timer, fm = gettimer(ui, opts)
1896 timer, fm = gettimer(ui, opts)
1897
1897
1898 def d():
1898 def d():
1899 if os.name != 'nt':
1899 if os.name != 'nt':
1900 os.system(
1900 os.system(
1901 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1901 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1902 )
1902 )
1903 else:
1903 else:
1904 os.environ['HGRCPATH'] = r' '
1904 os.environ['HGRCPATH'] = r' '
1905 os.system("%s version -q > NUL" % sys.argv[0])
1905 os.system("%s version -q > NUL" % sys.argv[0])
1906
1906
1907 timer(d)
1907 timer(d)
1908 fm.end()
1908 fm.end()
1909
1909
1910
1910
1911 def _find_stream_generator(version):
1911 def _find_stream_generator(version):
1912 """find the proper generator function for this stream version"""
1912 """find the proper generator function for this stream version"""
1913 import mercurial.streamclone
1913 import mercurial.streamclone
1914
1914
1915 available = {}
1915 available = {}
1916
1916
1917 # try to fetch a v1 generator
1917 # try to fetch a v1 generator
1918 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1918 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1919 if generatev1 is not None:
1919 if generatev1 is not None:
1920
1920
1921 def generate(repo):
1921 def generate(repo):
1922 entries, bytes, data = generatev2(repo, None, None, True)
1922 entries, bytes, data = generatev2(repo, None, None, True)
1923 return data
1923 return data
1924
1924
1925 available[b'v1'] = generatev1
1925 available[b'v1'] = generatev1
1926 # try to fetch a v2 generator
1926 # try to fetch a v2 generator
1927 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1927 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1928 if generatev2 is not None:
1928 if generatev2 is not None:
1929
1929
1930 def generate(repo):
1930 def generate(repo):
1931 entries, bytes, data = generatev2(repo, None, None, True)
1931 entries, bytes, data = generatev2(repo, None, None, True)
1932 return data
1932 return data
1933
1933
1934 available[b'v2'] = generate
1934 available[b'v2'] = generate
1935 # try to fetch a v3 generator
1935 # try to fetch a v3 generator
1936 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1936 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1937 if generatev3 is not None:
1937 if generatev3 is not None:
1938
1938
1939 def generate(repo):
1939 def generate(repo):
1940 entries, bytes, data = generatev3(repo, None, None, True)
1940 entries, bytes, data = generatev3(repo, None, None, True)
1941 return data
1941 return data
1942
1942
1943 available[b'v3-exp'] = generate
1943 available[b'v3-exp'] = generate
1944
1944
1945 # resolve the request
1945 # resolve the request
1946 if version == b"latest":
1946 if version == b"latest":
1947 # latest is the highest non experimental version
1947 # latest is the highest non experimental version
1948 latest_key = max(v for v in available if b'-exp' not in v)
1948 latest_key = max(v for v in available if b'-exp' not in v)
1949 return available[latest_key]
1949 return available[latest_key]
1950 elif version in available:
1950 elif version in available:
1951 return available[version]
1951 return available[version]
1952 else:
1952 else:
1953 msg = b"unkown or unavailable version: %s"
1953 msg = b"unkown or unavailable version: %s"
1954 msg %= version
1954 msg %= version
1955 hint = b"available versions: %s"
1955 hint = b"available versions: %s"
1956 hint %= b', '.join(sorted(available))
1956 hint %= b', '.join(sorted(available))
1957 raise error.Abort(msg, hint=hint)
1957 raise error.Abort(msg, hint=hint)
1958
1958
1959
1959
1960 @command(
1960 @command(
1961 b'perf::stream-locked-section',
1961 b'perf::stream-locked-section',
1962 [
1962 [
1963 (
1963 (
1964 b'',
1964 b'',
1965 b'stream-version',
1965 b'stream-version',
1966 b'latest',
1966 b'latest',
1967 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
1967 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
1968 ),
1968 ),
1969 ]
1969 ]
1970 + formatteropts,
1970 + formatteropts,
1971 )
1971 )
1972 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1972 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1973 """benchmark the initial, repo-locked, section of a stream-clone"""
1973 """benchmark the initial, repo-locked, section of a stream-clone"""
1974
1974
1975 opts = _byteskwargs(opts)
1975 opts = _byteskwargs(opts)
1976 timer, fm = gettimer(ui, opts)
1976 timer, fm = gettimer(ui, opts)
1977
1977
1978 # deletion of the generator may trigger some cleanup that we do not want to
1978 # deletion of the generator may trigger some cleanup that we do not want to
1979 # measure
1979 # measure
1980 result_holder = [None]
1980 result_holder = [None]
1981
1981
1982 def setupone():
1982 def setupone():
1983 result_holder[0] = None
1983 result_holder[0] = None
1984
1984
1985 generate = _find_stream_generator(stream_version)
1985 generate = _find_stream_generator(stream_version)
1986
1986
1987 def runone():
1987 def runone():
1988 # the lock is held for the duration the initialisation
1988 # the lock is held for the duration the initialisation
1989 result_holder[0] = generate(repo)
1989 result_holder[0] = generate(repo)
1990
1990
1991 timer(runone, setup=setupone, title=b"load")
1991 timer(runone, setup=setupone, title=b"load")
1992 fm.end()
1992 fm.end()
1993
1993
1994
1994
1995 @command(
1995 @command(
1996 b'perf::stream-generate',
1996 b'perf::stream-generate',
1997 [
1997 [
1998 (
1998 (
1999 b'',
1999 b'',
2000 b'stream-version',
2000 b'stream-version',
2001 b'latest',
2001 b'latest',
2002 b'stream version to us ("v1", "v2" or "latest", (the default))',
2002 b'stream version to us ("v1", "v2" or "latest", (the default))',
2003 ),
2003 ),
2004 ]
2004 ]
2005 + formatteropts,
2005 + formatteropts,
2006 )
2006 )
2007 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2007 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2008 """benchmark the full generation of a stream clone"""
2008 """benchmark the full generation of a stream clone"""
2009
2009
2010 opts = _byteskwargs(opts)
2010 opts = _byteskwargs(opts)
2011 timer, fm = gettimer(ui, opts)
2011 timer, fm = gettimer(ui, opts)
2012
2012
2013 # deletion of the generator may trigger some cleanup that we do not want to
2013 # deletion of the generator may trigger some cleanup that we do not want to
2014 # measure
2014 # measure
2015
2015
2016 generate = _find_stream_generator(stream_version)
2016 generate = _find_stream_generator(stream_version)
2017
2017
2018 def runone():
2018 def runone():
2019 # the lock is held for the duration the initialisation
2019 # the lock is held for the duration the initialisation
2020 for chunk in generate(repo):
2020 for chunk in generate(repo):
2021 pass
2021 pass
2022
2022
2023 timer(runone, title=b"generate")
2023 timer(runone, title=b"generate")
2024 fm.end()
2024 fm.end()
2025
2025
2026
2026
2027 @command(
2027 @command(
2028 b'perf::stream-consume',
2028 b'perf::stream-consume',
2029 formatteropts,
2029 formatteropts,
2030 )
2030 )
2031 def perf_stream_clone_consume(ui, repo, filename, **opts):
2031 def perf_stream_clone_consume(ui, repo, filename, **opts):
2032 """benchmark the full application of a stream clone
2032 """benchmark the full application of a stream clone
2033
2033
2034 This include the creation of the repository
2034 This include the creation of the repository
2035 """
2035 """
2036 # try except to appease check code
2036 # try except to appease check code
2037 msg = b"mercurial too old, missing necessary module: %s"
2037 msg = b"mercurial too old, missing necessary module: %s"
2038 try:
2038 try:
2039 from mercurial import bundle2
2039 from mercurial import bundle2
2040 except ImportError as exc:
2040 except ImportError as exc:
2041 msg %= _bytestr(exc)
2041 msg %= _bytestr(exc)
2042 raise error.Abort(msg)
2042 raise error.Abort(msg)
2043 try:
2043 try:
2044 from mercurial import exchange
2044 from mercurial import exchange
2045 except ImportError as exc:
2045 except ImportError as exc:
2046 msg %= _bytestr(exc)
2046 msg %= _bytestr(exc)
2047 raise error.Abort(msg)
2047 raise error.Abort(msg)
2048 try:
2048 try:
2049 from mercurial import hg
2049 from mercurial import hg
2050 except ImportError as exc:
2050 except ImportError as exc:
2051 msg %= _bytestr(exc)
2051 msg %= _bytestr(exc)
2052 raise error.Abort(msg)
2052 raise error.Abort(msg)
2053 try:
2053 try:
2054 from mercurial import localrepo
2054 from mercurial import localrepo
2055 except ImportError as exc:
2055 except ImportError as exc:
2056 msg %= _bytestr(exc)
2056 msg %= _bytestr(exc)
2057 raise error.Abort(msg)
2057 raise error.Abort(msg)
2058
2058
2059 opts = _byteskwargs(opts)
2059 opts = _byteskwargs(opts)
2060 timer, fm = gettimer(ui, opts)
2060 timer, fm = gettimer(ui, opts)
2061
2061
2062 # deletion of the generator may trigger some cleanup that we do not want to
2062 # deletion of the generator may trigger some cleanup that we do not want to
2063 # measure
2063 # measure
2064 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2064 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2065 raise error.Abort("not a readable file: %s" % filename)
2065 raise error.Abort("not a readable file: %s" % filename)
2066
2066
2067 run_variables = [None, None]
2067 run_variables = [None, None]
2068
2068
2069 @contextlib.contextmanager
2069 @contextlib.contextmanager
2070 def context():
2070 def context():
2071 with open(filename, mode='rb') as bundle:
2071 with open(filename, mode='rb') as bundle:
2072 with tempfile.TemporaryDirectory() as tmp_dir:
2072 with tempfile.TemporaryDirectory() as tmp_dir:
2073 tmp_dir = fsencode(tmp_dir)
2073 tmp_dir = fsencode(tmp_dir)
2074 run_variables[0] = bundle
2074 run_variables[0] = bundle
2075 run_variables[1] = tmp_dir
2075 run_variables[1] = tmp_dir
2076 yield
2076 yield
2077 run_variables[0] = None
2077 run_variables[0] = None
2078 run_variables[1] = None
2078 run_variables[1] = None
2079
2079
2080 def runone():
2080 def runone():
2081 bundle = run_variables[0]
2081 bundle = run_variables[0]
2082 tmp_dir = run_variables[1]
2082 tmp_dir = run_variables[1]
2083 # only pass ui when no srcrepo
2083 # only pass ui when no srcrepo
2084 localrepo.createrepository(
2084 localrepo.createrepository(
2085 repo.ui, tmp_dir, requirements=repo.requirements
2085 repo.ui, tmp_dir, requirements=repo.requirements
2086 )
2086 )
2087 target = hg.repository(repo.ui, tmp_dir)
2087 target = hg.repository(repo.ui, tmp_dir)
2088 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2088 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2089 # stream v1
2089 # stream v1
2090 if util.safehasattr(gen, 'apply'):
2090 if util.safehasattr(gen, 'apply'):
2091 gen.apply(target)
2091 gen.apply(target)
2092 else:
2092 else:
2093 with target.transaction(b"perf::stream-consume") as tr:
2093 with target.transaction(b"perf::stream-consume") as tr:
2094 bundle2.applybundle(
2094 bundle2.applybundle(
2095 target,
2095 target,
2096 gen,
2096 gen,
2097 tr,
2097 tr,
2098 source=b'unbundle',
2098 source=b'unbundle',
2099 url=filename,
2099 url=filename,
2100 )
2100 )
2101
2101
2102 timer(runone, context=context, title=b"consume")
2102 timer(runone, context=context, title=b"consume")
2103 fm.end()
2103 fm.end()
2104
2104
2105
2105
2106 @command(b'perf::parents|perfparents', formatteropts)
2106 @command(b'perf::parents|perfparents', formatteropts)
2107 def perfparents(ui, repo, **opts):
2107 def perfparents(ui, repo, **opts):
2108 """benchmark the time necessary to fetch one changeset's parents.
2108 """benchmark the time necessary to fetch one changeset's parents.
2109
2109
2110 The fetch is done using the `node identifier`, traversing all object layers
2110 The fetch is done using the `node identifier`, traversing all object layers
2111 from the repository object. The first N revisions will be used for this
2111 from the repository object. The first N revisions will be used for this
2112 benchmark. N is controlled by the ``perf.parentscount`` config option
2112 benchmark. N is controlled by the ``perf.parentscount`` config option
2113 (default: 1000).
2113 (default: 1000).
2114 """
2114 """
2115 opts = _byteskwargs(opts)
2115 opts = _byteskwargs(opts)
2116 timer, fm = gettimer(ui, opts)
2116 timer, fm = gettimer(ui, opts)
2117 # control the number of commits perfparents iterates over
2117 # control the number of commits perfparents iterates over
2118 # experimental config: perf.parentscount
2118 # experimental config: perf.parentscount
2119 count = getint(ui, b"perf", b"parentscount", 1000)
2119 count = getint(ui, b"perf", b"parentscount", 1000)
2120 if len(repo.changelog) < count:
2120 if len(repo.changelog) < count:
2121 raise error.Abort(b"repo needs %d commits for this test" % count)
2121 raise error.Abort(b"repo needs %d commits for this test" % count)
2122 repo = repo.unfiltered()
2122 repo = repo.unfiltered()
2123 nl = [repo.changelog.node(i) for i in _xrange(count)]
2123 nl = [repo.changelog.node(i) for i in _xrange(count)]
2124
2124
2125 def d():
2125 def d():
2126 for n in nl:
2126 for n in nl:
2127 repo.changelog.parents(n)
2127 repo.changelog.parents(n)
2128
2128
2129 timer(d)
2129 timer(d)
2130 fm.end()
2130 fm.end()
2131
2131
2132
2132
2133 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2133 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2134 def perfctxfiles(ui, repo, x, **opts):
2134 def perfctxfiles(ui, repo, x, **opts):
2135 opts = _byteskwargs(opts)
2135 opts = _byteskwargs(opts)
2136 x = int(x)
2136 x = int(x)
2137 timer, fm = gettimer(ui, opts)
2137 timer, fm = gettimer(ui, opts)
2138
2138
2139 def d():
2139 def d():
2140 len(repo[x].files())
2140 len(repo[x].files())
2141
2141
2142 timer(d)
2142 timer(d)
2143 fm.end()
2143 fm.end()
2144
2144
2145
2145
2146 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2146 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2147 def perfrawfiles(ui, repo, x, **opts):
2147 def perfrawfiles(ui, repo, x, **opts):
2148 opts = _byteskwargs(opts)
2148 opts = _byteskwargs(opts)
2149 x = int(x)
2149 x = int(x)
2150 timer, fm = gettimer(ui, opts)
2150 timer, fm = gettimer(ui, opts)
2151 cl = repo.changelog
2151 cl = repo.changelog
2152
2152
2153 def d():
2153 def d():
2154 len(cl.read(x)[3])
2154 len(cl.read(x)[3])
2155
2155
2156 timer(d)
2156 timer(d)
2157 fm.end()
2157 fm.end()
2158
2158
2159
2159
2160 @command(b'perf::lookup|perflookup', formatteropts)
2160 @command(b'perf::lookup|perflookup', formatteropts)
2161 def perflookup(ui, repo, rev, **opts):
2161 def perflookup(ui, repo, rev, **opts):
2162 opts = _byteskwargs(opts)
2162 opts = _byteskwargs(opts)
2163 timer, fm = gettimer(ui, opts)
2163 timer, fm = gettimer(ui, opts)
2164 timer(lambda: len(repo.lookup(rev)))
2164 timer(lambda: len(repo.lookup(rev)))
2165 fm.end()
2165 fm.end()
2166
2166
2167
2167
2168 @command(
2168 @command(
2169 b'perf::linelogedits|perflinelogedits',
2169 b'perf::linelogedits|perflinelogedits',
2170 [
2170 [
2171 (b'n', b'edits', 10000, b'number of edits'),
2171 (b'n', b'edits', 10000, b'number of edits'),
2172 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2172 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2173 ],
2173 ],
2174 norepo=True,
2174 norepo=True,
2175 )
2175 )
2176 def perflinelogedits(ui, **opts):
2176 def perflinelogedits(ui, **opts):
2177 from mercurial import linelog
2177 from mercurial import linelog
2178
2178
2179 opts = _byteskwargs(opts)
2179 opts = _byteskwargs(opts)
2180
2180
2181 edits = opts[b'edits']
2181 edits = opts[b'edits']
2182 maxhunklines = opts[b'max_hunk_lines']
2182 maxhunklines = opts[b'max_hunk_lines']
2183
2183
2184 maxb1 = 100000
2184 maxb1 = 100000
2185 random.seed(0)
2185 random.seed(0)
2186 randint = random.randint
2186 randint = random.randint
2187 currentlines = 0
2187 currentlines = 0
2188 arglist = []
2188 arglist = []
2189 for rev in _xrange(edits):
2189 for rev in _xrange(edits):
2190 a1 = randint(0, currentlines)
2190 a1 = randint(0, currentlines)
2191 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2191 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2192 b1 = randint(0, maxb1)
2192 b1 = randint(0, maxb1)
2193 b2 = randint(b1, b1 + maxhunklines)
2193 b2 = randint(b1, b1 + maxhunklines)
2194 currentlines += (b2 - b1) - (a2 - a1)
2194 currentlines += (b2 - b1) - (a2 - a1)
2195 arglist.append((rev, a1, a2, b1, b2))
2195 arglist.append((rev, a1, a2, b1, b2))
2196
2196
2197 def d():
2197 def d():
2198 ll = linelog.linelog()
2198 ll = linelog.linelog()
2199 for args in arglist:
2199 for args in arglist:
2200 ll.replacelines(*args)
2200 ll.replacelines(*args)
2201
2201
2202 timer, fm = gettimer(ui, opts)
2202 timer, fm = gettimer(ui, opts)
2203 timer(d)
2203 timer(d)
2204 fm.end()
2204 fm.end()
2205
2205
2206
2206
2207 @command(b'perf::revrange|perfrevrange', formatteropts)
2207 @command(b'perf::revrange|perfrevrange', formatteropts)
2208 def perfrevrange(ui, repo, *specs, **opts):
2208 def perfrevrange(ui, repo, *specs, **opts):
2209 opts = _byteskwargs(opts)
2209 opts = _byteskwargs(opts)
2210 timer, fm = gettimer(ui, opts)
2210 timer, fm = gettimer(ui, opts)
2211 revrange = scmutil.revrange
2211 revrange = scmutil.revrange
2212 timer(lambda: len(revrange(repo, specs)))
2212 timer(lambda: len(revrange(repo, specs)))
2213 fm.end()
2213 fm.end()
2214
2214
2215
2215
2216 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2216 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2217 def perfnodelookup(ui, repo, rev, **opts):
2217 def perfnodelookup(ui, repo, rev, **opts):
2218 opts = _byteskwargs(opts)
2218 opts = _byteskwargs(opts)
2219 timer, fm = gettimer(ui, opts)
2219 timer, fm = gettimer(ui, opts)
2220 import mercurial.revlog
2220 import mercurial.revlog
2221
2221
2222 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2222 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2223 n = scmutil.revsingle(repo, rev).node()
2223 n = scmutil.revsingle(repo, rev).node()
2224
2224
2225 try:
2225 try:
2226 cl = revlog(getsvfs(repo), radix=b"00changelog")
2226 cl = revlog(getsvfs(repo), radix=b"00changelog")
2227 except TypeError:
2227 except TypeError:
2228 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2228 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2229
2229
2230 def d():
2230 def d():
2231 cl.rev(n)
2231 cl.rev(n)
2232 clearcaches(cl)
2232 clearcaches(cl)
2233
2233
2234 timer(d)
2234 timer(d)
2235 fm.end()
2235 fm.end()
2236
2236
2237
2237
2238 @command(
2238 @command(
2239 b'perf::log|perflog',
2239 b'perf::log|perflog',
2240 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2240 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2241 )
2241 )
2242 def perflog(ui, repo, rev=None, **opts):
2242 def perflog(ui, repo, rev=None, **opts):
2243 opts = _byteskwargs(opts)
2243 opts = _byteskwargs(opts)
2244 if rev is None:
2244 if rev is None:
2245 rev = []
2245 rev = []
2246 timer, fm = gettimer(ui, opts)
2246 timer, fm = gettimer(ui, opts)
2247 ui.pushbuffer()
2247 ui.pushbuffer()
2248 timer(
2248 timer(
2249 lambda: commands.log(
2249 lambda: commands.log(
2250 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2250 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2251 )
2251 )
2252 )
2252 )
2253 ui.popbuffer()
2253 ui.popbuffer()
2254 fm.end()
2254 fm.end()
2255
2255
2256
2256
2257 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2257 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2258 def perfmoonwalk(ui, repo, **opts):
2258 def perfmoonwalk(ui, repo, **opts):
2259 """benchmark walking the changelog backwards
2259 """benchmark walking the changelog backwards
2260
2260
2261 This also loads the changelog data for each revision in the changelog.
2261 This also loads the changelog data for each revision in the changelog.
2262 """
2262 """
2263 opts = _byteskwargs(opts)
2263 opts = _byteskwargs(opts)
2264 timer, fm = gettimer(ui, opts)
2264 timer, fm = gettimer(ui, opts)
2265
2265
2266 def moonwalk():
2266 def moonwalk():
2267 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2267 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2268 ctx = repo[i]
2268 ctx = repo[i]
2269 ctx.branch() # read changelog data (in addition to the index)
2269 ctx.branch() # read changelog data (in addition to the index)
2270
2270
2271 timer(moonwalk)
2271 timer(moonwalk)
2272 fm.end()
2272 fm.end()
2273
2273
2274
2274
2275 @command(
2275 @command(
2276 b'perf::templating|perftemplating',
2276 b'perf::templating|perftemplating',
2277 [
2277 [
2278 (b'r', b'rev', [], b'revisions to run the template on'),
2278 (b'r', b'rev', [], b'revisions to run the template on'),
2279 ]
2279 ]
2280 + formatteropts,
2280 + formatteropts,
2281 )
2281 )
2282 def perftemplating(ui, repo, testedtemplate=None, **opts):
2282 def perftemplating(ui, repo, testedtemplate=None, **opts):
2283 """test the rendering time of a given template"""
2283 """test the rendering time of a given template"""
2284 if makelogtemplater is None:
2284 if makelogtemplater is None:
2285 raise error.Abort(
2285 raise error.Abort(
2286 b"perftemplating not available with this Mercurial",
2286 b"perftemplating not available with this Mercurial",
2287 hint=b"use 4.3 or later",
2287 hint=b"use 4.3 or later",
2288 )
2288 )
2289
2289
2290 opts = _byteskwargs(opts)
2290 opts = _byteskwargs(opts)
2291
2291
2292 nullui = ui.copy()
2292 nullui = ui.copy()
2293 nullui.fout = open(os.devnull, 'wb')
2293 nullui.fout = open(os.devnull, 'wb')
2294 nullui.disablepager()
2294 nullui.disablepager()
2295 revs = opts.get(b'rev')
2295 revs = opts.get(b'rev')
2296 if not revs:
2296 if not revs:
2297 revs = [b'all()']
2297 revs = [b'all()']
2298 revs = list(scmutil.revrange(repo, revs))
2298 revs = list(scmutil.revrange(repo, revs))
2299
2299
2300 defaulttemplate = (
2300 defaulttemplate = (
2301 b'{date|shortdate} [{rev}:{node|short}]'
2301 b'{date|shortdate} [{rev}:{node|short}]'
2302 b' {author|person}: {desc|firstline}\n'
2302 b' {author|person}: {desc|firstline}\n'
2303 )
2303 )
2304 if testedtemplate is None:
2304 if testedtemplate is None:
2305 testedtemplate = defaulttemplate
2305 testedtemplate = defaulttemplate
2306 displayer = makelogtemplater(nullui, repo, testedtemplate)
2306 displayer = makelogtemplater(nullui, repo, testedtemplate)
2307
2307
2308 def format():
2308 def format():
2309 for r in revs:
2309 for r in revs:
2310 ctx = repo[r]
2310 ctx = repo[r]
2311 displayer.show(ctx)
2311 displayer.show(ctx)
2312 displayer.flush(ctx)
2312 displayer.flush(ctx)
2313
2313
2314 timer, fm = gettimer(ui, opts)
2314 timer, fm = gettimer(ui, opts)
2315 timer(format)
2315 timer(format)
2316 fm.end()
2316 fm.end()
2317
2317
2318
2318
2319 def _displaystats(ui, opts, entries, data):
2319 def _displaystats(ui, opts, entries, data):
2320 # use a second formatter because the data are quite different, not sure
2320 # use a second formatter because the data are quite different, not sure
2321 # how it flies with the templater.
2321 # how it flies with the templater.
2322 fm = ui.formatter(b'perf-stats', opts)
2322 fm = ui.formatter(b'perf-stats', opts)
2323 for key, title in entries:
2323 for key, title in entries:
2324 values = data[key]
2324 values = data[key]
2325 nbvalues = len(data)
2325 nbvalues = len(data)
2326 values.sort()
2326 values.sort()
2327 stats = {
2327 stats = {
2328 'key': key,
2328 'key': key,
2329 'title': title,
2329 'title': title,
2330 'nbitems': len(values),
2330 'nbitems': len(values),
2331 'min': values[0][0],
2331 'min': values[0][0],
2332 '10%': values[(nbvalues * 10) // 100][0],
2332 '10%': values[(nbvalues * 10) // 100][0],
2333 '25%': values[(nbvalues * 25) // 100][0],
2333 '25%': values[(nbvalues * 25) // 100][0],
2334 '50%': values[(nbvalues * 50) // 100][0],
2334 '50%': values[(nbvalues * 50) // 100][0],
2335 '75%': values[(nbvalues * 75) // 100][0],
2335 '75%': values[(nbvalues * 75) // 100][0],
2336 '80%': values[(nbvalues * 80) // 100][0],
2336 '80%': values[(nbvalues * 80) // 100][0],
2337 '85%': values[(nbvalues * 85) // 100][0],
2337 '85%': values[(nbvalues * 85) // 100][0],
2338 '90%': values[(nbvalues * 90) // 100][0],
2338 '90%': values[(nbvalues * 90) // 100][0],
2339 '95%': values[(nbvalues * 95) // 100][0],
2339 '95%': values[(nbvalues * 95) // 100][0],
2340 '99%': values[(nbvalues * 99) // 100][0],
2340 '99%': values[(nbvalues * 99) // 100][0],
2341 'max': values[-1][0],
2341 'max': values[-1][0],
2342 }
2342 }
2343 fm.startitem()
2343 fm.startitem()
2344 fm.data(**stats)
2344 fm.data(**stats)
2345 # make node pretty for the human output
2345 # make node pretty for the human output
2346 fm.plain('### %s (%d items)\n' % (title, len(values)))
2346 fm.plain('### %s (%d items)\n' % (title, len(values)))
2347 lines = [
2347 lines = [
2348 'min',
2348 'min',
2349 '10%',
2349 '10%',
2350 '25%',
2350 '25%',
2351 '50%',
2351 '50%',
2352 '75%',
2352 '75%',
2353 '80%',
2353 '80%',
2354 '85%',
2354 '85%',
2355 '90%',
2355 '90%',
2356 '95%',
2356 '95%',
2357 '99%',
2357 '99%',
2358 'max',
2358 'max',
2359 ]
2359 ]
2360 for l in lines:
2360 for l in lines:
2361 fm.plain('%s: %s\n' % (l, stats[l]))
2361 fm.plain('%s: %s\n' % (l, stats[l]))
2362 fm.end()
2362 fm.end()
2363
2363
2364
2364
2365 @command(
2365 @command(
2366 b'perf::helper-mergecopies|perfhelper-mergecopies',
2366 b'perf::helper-mergecopies|perfhelper-mergecopies',
2367 formatteropts
2367 formatteropts
2368 + [
2368 + [
2369 (b'r', b'revs', [], b'restrict search to these revisions'),
2369 (b'r', b'revs', [], b'restrict search to these revisions'),
2370 (b'', b'timing', False, b'provides extra data (costly)'),
2370 (b'', b'timing', False, b'provides extra data (costly)'),
2371 (b'', b'stats', False, b'provides statistic about the measured data'),
2371 (b'', b'stats', False, b'provides statistic about the measured data'),
2372 ],
2372 ],
2373 )
2373 )
2374 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2374 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2375 """find statistics about potential parameters for `perfmergecopies`
2375 """find statistics about potential parameters for `perfmergecopies`
2376
2376
2377 This command find (base, p1, p2) triplet relevant for copytracing
2377 This command find (base, p1, p2) triplet relevant for copytracing
2378 benchmarking in the context of a merge. It reports values for some of the
2378 benchmarking in the context of a merge. It reports values for some of the
2379 parameters that impact merge copy tracing time during merge.
2379 parameters that impact merge copy tracing time during merge.
2380
2380
2381 If `--timing` is set, rename detection is run and the associated timing
2381 If `--timing` is set, rename detection is run and the associated timing
2382 will be reported. The extra details come at the cost of slower command
2382 will be reported. The extra details come at the cost of slower command
2383 execution.
2383 execution.
2384
2384
2385 Since rename detection is only run once, other factors might easily
2385 Since rename detection is only run once, other factors might easily
2386 affect the precision of the timing. However it should give a good
2386 affect the precision of the timing. However it should give a good
2387 approximation of which revision triplets are very costly.
2387 approximation of which revision triplets are very costly.
2388 """
2388 """
2389 opts = _byteskwargs(opts)
2389 opts = _byteskwargs(opts)
2390 fm = ui.formatter(b'perf', opts)
2390 fm = ui.formatter(b'perf', opts)
2391 dotiming = opts[b'timing']
2391 dotiming = opts[b'timing']
2392 dostats = opts[b'stats']
2392 dostats = opts[b'stats']
2393
2393
2394 output_template = [
2394 output_template = [
2395 ("base", "%(base)12s"),
2395 ("base", "%(base)12s"),
2396 ("p1", "%(p1.node)12s"),
2396 ("p1", "%(p1.node)12s"),
2397 ("p2", "%(p2.node)12s"),
2397 ("p2", "%(p2.node)12s"),
2398 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2398 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2399 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2399 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2400 ("p1.renames", "%(p1.renamedfiles)12d"),
2400 ("p1.renames", "%(p1.renamedfiles)12d"),
2401 ("p1.time", "%(p1.time)12.3f"),
2401 ("p1.time", "%(p1.time)12.3f"),
2402 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2402 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2403 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2403 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2404 ("p2.renames", "%(p2.renamedfiles)12d"),
2404 ("p2.renames", "%(p2.renamedfiles)12d"),
2405 ("p2.time", "%(p2.time)12.3f"),
2405 ("p2.time", "%(p2.time)12.3f"),
2406 ("renames", "%(nbrenamedfiles)12d"),
2406 ("renames", "%(nbrenamedfiles)12d"),
2407 ("total.time", "%(time)12.3f"),
2407 ("total.time", "%(time)12.3f"),
2408 ]
2408 ]
2409 if not dotiming:
2409 if not dotiming:
2410 output_template = [
2410 output_template = [
2411 i
2411 i
2412 for i in output_template
2412 for i in output_template
2413 if not ('time' in i[0] or 'renames' in i[0])
2413 if not ('time' in i[0] or 'renames' in i[0])
2414 ]
2414 ]
2415 header_names = [h for (h, v) in output_template]
2415 header_names = [h for (h, v) in output_template]
2416 output = ' '.join([v for (h, v) in output_template]) + '\n'
2416 output = ' '.join([v for (h, v) in output_template]) + '\n'
2417 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2417 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2418 fm.plain(header % tuple(header_names))
2418 fm.plain(header % tuple(header_names))
2419
2419
2420 if not revs:
2420 if not revs:
2421 revs = ['all()']
2421 revs = ['all()']
2422 revs = scmutil.revrange(repo, revs)
2422 revs = scmutil.revrange(repo, revs)
2423
2423
2424 if dostats:
2424 if dostats:
2425 alldata = {
2425 alldata = {
2426 'nbrevs': [],
2426 'nbrevs': [],
2427 'nbmissingfiles': [],
2427 'nbmissingfiles': [],
2428 }
2428 }
2429 if dotiming:
2429 if dotiming:
2430 alldata['parentnbrenames'] = []
2430 alldata['parentnbrenames'] = []
2431 alldata['totalnbrenames'] = []
2431 alldata['totalnbrenames'] = []
2432 alldata['parenttime'] = []
2432 alldata['parenttime'] = []
2433 alldata['totaltime'] = []
2433 alldata['totaltime'] = []
2434
2434
2435 roi = repo.revs('merge() and %ld', revs)
2435 roi = repo.revs('merge() and %ld', revs)
2436 for r in roi:
2436 for r in roi:
2437 ctx = repo[r]
2437 ctx = repo[r]
2438 p1 = ctx.p1()
2438 p1 = ctx.p1()
2439 p2 = ctx.p2()
2439 p2 = ctx.p2()
2440 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2440 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2441 for b in bases:
2441 for b in bases:
2442 b = repo[b]
2442 b = repo[b]
2443 p1missing = copies._computeforwardmissing(b, p1)
2443 p1missing = copies._computeforwardmissing(b, p1)
2444 p2missing = copies._computeforwardmissing(b, p2)
2444 p2missing = copies._computeforwardmissing(b, p2)
2445 data = {
2445 data = {
2446 b'base': b.hex(),
2446 b'base': b.hex(),
2447 b'p1.node': p1.hex(),
2447 b'p1.node': p1.hex(),
2448 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2448 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2449 b'p1.nbmissingfiles': len(p1missing),
2449 b'p1.nbmissingfiles': len(p1missing),
2450 b'p2.node': p2.hex(),
2450 b'p2.node': p2.hex(),
2451 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2451 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2452 b'p2.nbmissingfiles': len(p2missing),
2452 b'p2.nbmissingfiles': len(p2missing),
2453 }
2453 }
2454 if dostats:
2454 if dostats:
2455 if p1missing:
2455 if p1missing:
2456 alldata['nbrevs'].append(
2456 alldata['nbrevs'].append(
2457 (data['p1.nbrevs'], b.hex(), p1.hex())
2457 (data['p1.nbrevs'], b.hex(), p1.hex())
2458 )
2458 )
2459 alldata['nbmissingfiles'].append(
2459 alldata['nbmissingfiles'].append(
2460 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2460 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2461 )
2461 )
2462 if p2missing:
2462 if p2missing:
2463 alldata['nbrevs'].append(
2463 alldata['nbrevs'].append(
2464 (data['p2.nbrevs'], b.hex(), p2.hex())
2464 (data['p2.nbrevs'], b.hex(), p2.hex())
2465 )
2465 )
2466 alldata['nbmissingfiles'].append(
2466 alldata['nbmissingfiles'].append(
2467 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2467 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2468 )
2468 )
2469 if dotiming:
2469 if dotiming:
2470 begin = util.timer()
2470 begin = util.timer()
2471 mergedata = copies.mergecopies(repo, p1, p2, b)
2471 mergedata = copies.mergecopies(repo, p1, p2, b)
2472 end = util.timer()
2472 end = util.timer()
2473 # not very stable timing since we did only one run
2473 # not very stable timing since we did only one run
2474 data['time'] = end - begin
2474 data['time'] = end - begin
2475 # mergedata contains five dicts: "copy", "movewithdir",
2475 # mergedata contains five dicts: "copy", "movewithdir",
2476 # "diverge", "renamedelete" and "dirmove".
2476 # "diverge", "renamedelete" and "dirmove".
2477 # The first 4 are about renamed file so lets count that.
2477 # The first 4 are about renamed file so lets count that.
2478 renames = len(mergedata[0])
2478 renames = len(mergedata[0])
2479 renames += len(mergedata[1])
2479 renames += len(mergedata[1])
2480 renames += len(mergedata[2])
2480 renames += len(mergedata[2])
2481 renames += len(mergedata[3])
2481 renames += len(mergedata[3])
2482 data['nbrenamedfiles'] = renames
2482 data['nbrenamedfiles'] = renames
2483 begin = util.timer()
2483 begin = util.timer()
2484 p1renames = copies.pathcopies(b, p1)
2484 p1renames = copies.pathcopies(b, p1)
2485 end = util.timer()
2485 end = util.timer()
2486 data['p1.time'] = end - begin
2486 data['p1.time'] = end - begin
2487 begin = util.timer()
2487 begin = util.timer()
2488 p2renames = copies.pathcopies(b, p2)
2488 p2renames = copies.pathcopies(b, p2)
2489 end = util.timer()
2489 end = util.timer()
2490 data['p2.time'] = end - begin
2490 data['p2.time'] = end - begin
2491 data['p1.renamedfiles'] = len(p1renames)
2491 data['p1.renamedfiles'] = len(p1renames)
2492 data['p2.renamedfiles'] = len(p2renames)
2492 data['p2.renamedfiles'] = len(p2renames)
2493
2493
2494 if dostats:
2494 if dostats:
2495 if p1missing:
2495 if p1missing:
2496 alldata['parentnbrenames'].append(
2496 alldata['parentnbrenames'].append(
2497 (data['p1.renamedfiles'], b.hex(), p1.hex())
2497 (data['p1.renamedfiles'], b.hex(), p1.hex())
2498 )
2498 )
2499 alldata['parenttime'].append(
2499 alldata['parenttime'].append(
2500 (data['p1.time'], b.hex(), p1.hex())
2500 (data['p1.time'], b.hex(), p1.hex())
2501 )
2501 )
2502 if p2missing:
2502 if p2missing:
2503 alldata['parentnbrenames'].append(
2503 alldata['parentnbrenames'].append(
2504 (data['p2.renamedfiles'], b.hex(), p2.hex())
2504 (data['p2.renamedfiles'], b.hex(), p2.hex())
2505 )
2505 )
2506 alldata['parenttime'].append(
2506 alldata['parenttime'].append(
2507 (data['p2.time'], b.hex(), p2.hex())
2507 (data['p2.time'], b.hex(), p2.hex())
2508 )
2508 )
2509 if p1missing or p2missing:
2509 if p1missing or p2missing:
2510 alldata['totalnbrenames'].append(
2510 alldata['totalnbrenames'].append(
2511 (
2511 (
2512 data['nbrenamedfiles'],
2512 data['nbrenamedfiles'],
2513 b.hex(),
2513 b.hex(),
2514 p1.hex(),
2514 p1.hex(),
2515 p2.hex(),
2515 p2.hex(),
2516 )
2516 )
2517 )
2517 )
2518 alldata['totaltime'].append(
2518 alldata['totaltime'].append(
2519 (data['time'], b.hex(), p1.hex(), p2.hex())
2519 (data['time'], b.hex(), p1.hex(), p2.hex())
2520 )
2520 )
2521 fm.startitem()
2521 fm.startitem()
2522 fm.data(**data)
2522 fm.data(**data)
2523 # make node pretty for the human output
2523 # make node pretty for the human output
2524 out = data.copy()
2524 out = data.copy()
2525 out['base'] = fm.hexfunc(b.node())
2525 out['base'] = fm.hexfunc(b.node())
2526 out['p1.node'] = fm.hexfunc(p1.node())
2526 out['p1.node'] = fm.hexfunc(p1.node())
2527 out['p2.node'] = fm.hexfunc(p2.node())
2527 out['p2.node'] = fm.hexfunc(p2.node())
2528 fm.plain(output % out)
2528 fm.plain(output % out)
2529
2529
2530 fm.end()
2530 fm.end()
2531 if dostats:
2531 if dostats:
2532 # use a second formatter because the data are quite different, not sure
2532 # use a second formatter because the data are quite different, not sure
2533 # how it flies with the templater.
2533 # how it flies with the templater.
2534 entries = [
2534 entries = [
2535 ('nbrevs', 'number of revision covered'),
2535 ('nbrevs', 'number of revision covered'),
2536 ('nbmissingfiles', 'number of missing files at head'),
2536 ('nbmissingfiles', 'number of missing files at head'),
2537 ]
2537 ]
2538 if dotiming:
2538 if dotiming:
2539 entries.append(
2539 entries.append(
2540 ('parentnbrenames', 'rename from one parent to base')
2540 ('parentnbrenames', 'rename from one parent to base')
2541 )
2541 )
2542 entries.append(('totalnbrenames', 'total number of renames'))
2542 entries.append(('totalnbrenames', 'total number of renames'))
2543 entries.append(('parenttime', 'time for one parent'))
2543 entries.append(('parenttime', 'time for one parent'))
2544 entries.append(('totaltime', 'time for both parents'))
2544 entries.append(('totaltime', 'time for both parents'))
2545 _displaystats(ui, opts, entries, alldata)
2545 _displaystats(ui, opts, entries, alldata)
2546
2546
2547
2547
2548 @command(
2548 @command(
2549 b'perf::helper-pathcopies|perfhelper-pathcopies',
2549 b'perf::helper-pathcopies|perfhelper-pathcopies',
2550 formatteropts
2550 formatteropts
2551 + [
2551 + [
2552 (b'r', b'revs', [], b'restrict search to these revisions'),
2552 (b'r', b'revs', [], b'restrict search to these revisions'),
2553 (b'', b'timing', False, b'provides extra data (costly)'),
2553 (b'', b'timing', False, b'provides extra data (costly)'),
2554 (b'', b'stats', False, b'provides statistic about the measured data'),
2554 (b'', b'stats', False, b'provides statistic about the measured data'),
2555 ],
2555 ],
2556 )
2556 )
2557 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2557 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2558 """find statistic about potential parameters for the `perftracecopies`
2558 """find statistic about potential parameters for the `perftracecopies`
2559
2559
2560 This command find source-destination pair relevant for copytracing testing.
2560 This command find source-destination pair relevant for copytracing testing.
2561 It report value for some of the parameters that impact copy tracing time.
2561 It report value for some of the parameters that impact copy tracing time.
2562
2562
2563 If `--timing` is set, rename detection is run and the associated timing
2563 If `--timing` is set, rename detection is run and the associated timing
2564 will be reported. The extra details comes at the cost of a slower command
2564 will be reported. The extra details comes at the cost of a slower command
2565 execution.
2565 execution.
2566
2566
2567 Since the rename detection is only run once, other factors might easily
2567 Since the rename detection is only run once, other factors might easily
2568 affect the precision of the timing. However it should give a good
2568 affect the precision of the timing. However it should give a good
2569 approximation of which revision pairs are very costly.
2569 approximation of which revision pairs are very costly.
2570 """
2570 """
2571 opts = _byteskwargs(opts)
2571 opts = _byteskwargs(opts)
2572 fm = ui.formatter(b'perf', opts)
2572 fm = ui.formatter(b'perf', opts)
2573 dotiming = opts[b'timing']
2573 dotiming = opts[b'timing']
2574 dostats = opts[b'stats']
2574 dostats = opts[b'stats']
2575
2575
2576 if dotiming:
2576 if dotiming:
2577 header = '%12s %12s %12s %12s %12s %12s\n'
2577 header = '%12s %12s %12s %12s %12s %12s\n'
2578 output = (
2578 output = (
2579 "%(source)12s %(destination)12s "
2579 "%(source)12s %(destination)12s "
2580 "%(nbrevs)12d %(nbmissingfiles)12d "
2580 "%(nbrevs)12d %(nbmissingfiles)12d "
2581 "%(nbrenamedfiles)12d %(time)18.5f\n"
2581 "%(nbrenamedfiles)12d %(time)18.5f\n"
2582 )
2582 )
2583 header_names = (
2583 header_names = (
2584 "source",
2584 "source",
2585 "destination",
2585 "destination",
2586 "nb-revs",
2586 "nb-revs",
2587 "nb-files",
2587 "nb-files",
2588 "nb-renames",
2588 "nb-renames",
2589 "time",
2589 "time",
2590 )
2590 )
2591 fm.plain(header % header_names)
2591 fm.plain(header % header_names)
2592 else:
2592 else:
2593 header = '%12s %12s %12s %12s\n'
2593 header = '%12s %12s %12s %12s\n'
2594 output = (
2594 output = (
2595 "%(source)12s %(destination)12s "
2595 "%(source)12s %(destination)12s "
2596 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2596 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2597 )
2597 )
2598 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2598 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2599
2599
2600 if not revs:
2600 if not revs:
2601 revs = ['all()']
2601 revs = ['all()']
2602 revs = scmutil.revrange(repo, revs)
2602 revs = scmutil.revrange(repo, revs)
2603
2603
2604 if dostats:
2604 if dostats:
2605 alldata = {
2605 alldata = {
2606 'nbrevs': [],
2606 'nbrevs': [],
2607 'nbmissingfiles': [],
2607 'nbmissingfiles': [],
2608 }
2608 }
2609 if dotiming:
2609 if dotiming:
2610 alldata['nbrenames'] = []
2610 alldata['nbrenames'] = []
2611 alldata['time'] = []
2611 alldata['time'] = []
2612
2612
2613 roi = repo.revs('merge() and %ld', revs)
2613 roi = repo.revs('merge() and %ld', revs)
2614 for r in roi:
2614 for r in roi:
2615 ctx = repo[r]
2615 ctx = repo[r]
2616 p1 = ctx.p1().rev()
2616 p1 = ctx.p1().rev()
2617 p2 = ctx.p2().rev()
2617 p2 = ctx.p2().rev()
2618 bases = repo.changelog._commonancestorsheads(p1, p2)
2618 bases = repo.changelog._commonancestorsheads(p1, p2)
2619 for p in (p1, p2):
2619 for p in (p1, p2):
2620 for b in bases:
2620 for b in bases:
2621 base = repo[b]
2621 base = repo[b]
2622 parent = repo[p]
2622 parent = repo[p]
2623 missing = copies._computeforwardmissing(base, parent)
2623 missing = copies._computeforwardmissing(base, parent)
2624 if not missing:
2624 if not missing:
2625 continue
2625 continue
2626 data = {
2626 data = {
2627 b'source': base.hex(),
2627 b'source': base.hex(),
2628 b'destination': parent.hex(),
2628 b'destination': parent.hex(),
2629 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2629 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2630 b'nbmissingfiles': len(missing),
2630 b'nbmissingfiles': len(missing),
2631 }
2631 }
2632 if dostats:
2632 if dostats:
2633 alldata['nbrevs'].append(
2633 alldata['nbrevs'].append(
2634 (
2634 (
2635 data['nbrevs'],
2635 data['nbrevs'],
2636 base.hex(),
2636 base.hex(),
2637 parent.hex(),
2637 parent.hex(),
2638 )
2638 )
2639 )
2639 )
2640 alldata['nbmissingfiles'].append(
2640 alldata['nbmissingfiles'].append(
2641 (
2641 (
2642 data['nbmissingfiles'],
2642 data['nbmissingfiles'],
2643 base.hex(),
2643 base.hex(),
2644 parent.hex(),
2644 parent.hex(),
2645 )
2645 )
2646 )
2646 )
2647 if dotiming:
2647 if dotiming:
2648 begin = util.timer()
2648 begin = util.timer()
2649 renames = copies.pathcopies(base, parent)
2649 renames = copies.pathcopies(base, parent)
2650 end = util.timer()
2650 end = util.timer()
2651 # not very stable timing since we did only one run
2651 # not very stable timing since we did only one run
2652 data['time'] = end - begin
2652 data['time'] = end - begin
2653 data['nbrenamedfiles'] = len(renames)
2653 data['nbrenamedfiles'] = len(renames)
2654 if dostats:
2654 if dostats:
2655 alldata['time'].append(
2655 alldata['time'].append(
2656 (
2656 (
2657 data['time'],
2657 data['time'],
2658 base.hex(),
2658 base.hex(),
2659 parent.hex(),
2659 parent.hex(),
2660 )
2660 )
2661 )
2661 )
2662 alldata['nbrenames'].append(
2662 alldata['nbrenames'].append(
2663 (
2663 (
2664 data['nbrenamedfiles'],
2664 data['nbrenamedfiles'],
2665 base.hex(),
2665 base.hex(),
2666 parent.hex(),
2666 parent.hex(),
2667 )
2667 )
2668 )
2668 )
2669 fm.startitem()
2669 fm.startitem()
2670 fm.data(**data)
2670 fm.data(**data)
2671 out = data.copy()
2671 out = data.copy()
2672 out['source'] = fm.hexfunc(base.node())
2672 out['source'] = fm.hexfunc(base.node())
2673 out['destination'] = fm.hexfunc(parent.node())
2673 out['destination'] = fm.hexfunc(parent.node())
2674 fm.plain(output % out)
2674 fm.plain(output % out)
2675
2675
2676 fm.end()
2676 fm.end()
2677 if dostats:
2677 if dostats:
2678 entries = [
2678 entries = [
2679 ('nbrevs', 'number of revision covered'),
2679 ('nbrevs', 'number of revision covered'),
2680 ('nbmissingfiles', 'number of missing files at head'),
2680 ('nbmissingfiles', 'number of missing files at head'),
2681 ]
2681 ]
2682 if dotiming:
2682 if dotiming:
2683 entries.append(('nbrenames', 'renamed files'))
2683 entries.append(('nbrenames', 'renamed files'))
2684 entries.append(('time', 'time'))
2684 entries.append(('time', 'time'))
2685 _displaystats(ui, opts, entries, alldata)
2685 _displaystats(ui, opts, entries, alldata)
2686
2686
2687
2687
2688 @command(b'perf::cca|perfcca', formatteropts)
2688 @command(b'perf::cca|perfcca', formatteropts)
2689 def perfcca(ui, repo, **opts):
2689 def perfcca(ui, repo, **opts):
2690 opts = _byteskwargs(opts)
2690 opts = _byteskwargs(opts)
2691 timer, fm = gettimer(ui, opts)
2691 timer, fm = gettimer(ui, opts)
2692 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2692 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2693 fm.end()
2693 fm.end()
2694
2694
2695
2695
2696 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2696 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2697 def perffncacheload(ui, repo, **opts):
2697 def perffncacheload(ui, repo, **opts):
2698 opts = _byteskwargs(opts)
2698 opts = _byteskwargs(opts)
2699 timer, fm = gettimer(ui, opts)
2699 timer, fm = gettimer(ui, opts)
2700 s = repo.store
2700 s = repo.store
2701
2701
2702 def d():
2702 def d():
2703 s.fncache._load()
2703 s.fncache._load()
2704
2704
2705 timer(d)
2705 timer(d)
2706 fm.end()
2706 fm.end()
2707
2707
2708
2708
2709 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2709 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2710 def perffncachewrite(ui, repo, **opts):
2710 def perffncachewrite(ui, repo, **opts):
2711 opts = _byteskwargs(opts)
2711 opts = _byteskwargs(opts)
2712 timer, fm = gettimer(ui, opts)
2712 timer, fm = gettimer(ui, opts)
2713 s = repo.store
2713 s = repo.store
2714 lock = repo.lock()
2714 lock = repo.lock()
2715 s.fncache._load()
2715 s.fncache._load()
2716 tr = repo.transaction(b'perffncachewrite')
2716 tr = repo.transaction(b'perffncachewrite')
2717 tr.addbackup(b'fncache')
2717 tr.addbackup(b'fncache')
2718
2718
2719 def d():
2719 def d():
2720 s.fncache._dirty = True
2720 s.fncache._dirty = True
2721 s.fncache.write(tr)
2721 s.fncache.write(tr)
2722
2722
2723 timer(d)
2723 timer(d)
2724 tr.close()
2724 tr.close()
2725 lock.release()
2725 lock.release()
2726 fm.end()
2726 fm.end()
2727
2727
2728
2728
2729 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2729 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2730 def perffncacheencode(ui, repo, **opts):
2730 def perffncacheencode(ui, repo, **opts):
2731 opts = _byteskwargs(opts)
2731 opts = _byteskwargs(opts)
2732 timer, fm = gettimer(ui, opts)
2732 timer, fm = gettimer(ui, opts)
2733 s = repo.store
2733 s = repo.store
2734 s.fncache._load()
2734 s.fncache._load()
2735
2735
2736 def d():
2736 def d():
2737 for p in s.fncache.entries:
2737 for p in s.fncache.entries:
2738 s.encode(p)
2738 s.encode(p)
2739
2739
2740 timer(d)
2740 timer(d)
2741 fm.end()
2741 fm.end()
2742
2742
2743
2743
2744 def _bdiffworker(q, blocks, xdiff, ready, done):
2744 def _bdiffworker(q, blocks, xdiff, ready, done):
2745 while not done.is_set():
2745 while not done.is_set():
2746 pair = q.get()
2746 pair = q.get()
2747 while pair is not None:
2747 while pair is not None:
2748 if xdiff:
2748 if xdiff:
2749 mdiff.bdiff.xdiffblocks(*pair)
2749 mdiff.bdiff.xdiffblocks(*pair)
2750 elif blocks:
2750 elif blocks:
2751 mdiff.bdiff.blocks(*pair)
2751 mdiff.bdiff.blocks(*pair)
2752 else:
2752 else:
2753 mdiff.textdiff(*pair)
2753 mdiff.textdiff(*pair)
2754 q.task_done()
2754 q.task_done()
2755 pair = q.get()
2755 pair = q.get()
2756 q.task_done() # for the None one
2756 q.task_done() # for the None one
2757 with ready:
2757 with ready:
2758 ready.wait()
2758 ready.wait()
2759
2759
2760
2760
2761 def _manifestrevision(repo, mnode):
2761 def _manifestrevision(repo, mnode):
2762 ml = repo.manifestlog
2762 ml = repo.manifestlog
2763
2763
2764 if util.safehasattr(ml, b'getstorage'):
2764 if util.safehasattr(ml, b'getstorage'):
2765 store = ml.getstorage(b'')
2765 store = ml.getstorage(b'')
2766 else:
2766 else:
2767 store = ml._revlog
2767 store = ml._revlog
2768
2768
2769 return store.revision(mnode)
2769 return store.revision(mnode)
2770
2770
2771
2771
2772 @command(
2772 @command(
2773 b'perf::bdiff|perfbdiff',
2773 b'perf::bdiff|perfbdiff',
2774 revlogopts
2774 revlogopts
2775 + formatteropts
2775 + formatteropts
2776 + [
2776 + [
2777 (
2777 (
2778 b'',
2778 b'',
2779 b'count',
2779 b'count',
2780 1,
2780 1,
2781 b'number of revisions to test (when using --startrev)',
2781 b'number of revisions to test (when using --startrev)',
2782 ),
2782 ),
2783 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2783 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2784 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2784 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2785 (b'', b'blocks', False, b'test computing diffs into blocks'),
2785 (b'', b'blocks', False, b'test computing diffs into blocks'),
2786 (b'', b'xdiff', False, b'use xdiff algorithm'),
2786 (b'', b'xdiff', False, b'use xdiff algorithm'),
2787 ],
2787 ],
2788 b'-c|-m|FILE REV',
2788 b'-c|-m|FILE REV',
2789 )
2789 )
2790 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2790 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2791 """benchmark a bdiff between revisions
2791 """benchmark a bdiff between revisions
2792
2792
2793 By default, benchmark a bdiff between its delta parent and itself.
2793 By default, benchmark a bdiff between its delta parent and itself.
2794
2794
2795 With ``--count``, benchmark bdiffs between delta parents and self for N
2795 With ``--count``, benchmark bdiffs between delta parents and self for N
2796 revisions starting at the specified revision.
2796 revisions starting at the specified revision.
2797
2797
2798 With ``--alldata``, assume the requested revision is a changeset and
2798 With ``--alldata``, assume the requested revision is a changeset and
2799 measure bdiffs for all changes related to that changeset (manifest
2799 measure bdiffs for all changes related to that changeset (manifest
2800 and filelogs).
2800 and filelogs).
2801 """
2801 """
2802 opts = _byteskwargs(opts)
2802 opts = _byteskwargs(opts)
2803
2803
2804 if opts[b'xdiff'] and not opts[b'blocks']:
2804 if opts[b'xdiff'] and not opts[b'blocks']:
2805 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2805 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2806
2806
2807 if opts[b'alldata']:
2807 if opts[b'alldata']:
2808 opts[b'changelog'] = True
2808 opts[b'changelog'] = True
2809
2809
2810 if opts.get(b'changelog') or opts.get(b'manifest'):
2810 if opts.get(b'changelog') or opts.get(b'manifest'):
2811 file_, rev = None, file_
2811 file_, rev = None, file_
2812 elif rev is None:
2812 elif rev is None:
2813 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2813 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2814
2814
2815 blocks = opts[b'blocks']
2815 blocks = opts[b'blocks']
2816 xdiff = opts[b'xdiff']
2816 xdiff = opts[b'xdiff']
2817 textpairs = []
2817 textpairs = []
2818
2818
2819 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2819 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2820
2820
2821 startrev = r.rev(r.lookup(rev))
2821 startrev = r.rev(r.lookup(rev))
2822 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2822 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2823 if opts[b'alldata']:
2823 if opts[b'alldata']:
2824 # Load revisions associated with changeset.
2824 # Load revisions associated with changeset.
2825 ctx = repo[rev]
2825 ctx = repo[rev]
2826 mtext = _manifestrevision(repo, ctx.manifestnode())
2826 mtext = _manifestrevision(repo, ctx.manifestnode())
2827 for pctx in ctx.parents():
2827 for pctx in ctx.parents():
2828 pman = _manifestrevision(repo, pctx.manifestnode())
2828 pman = _manifestrevision(repo, pctx.manifestnode())
2829 textpairs.append((pman, mtext))
2829 textpairs.append((pman, mtext))
2830
2830
2831 # Load filelog revisions by iterating manifest delta.
2831 # Load filelog revisions by iterating manifest delta.
2832 man = ctx.manifest()
2832 man = ctx.manifest()
2833 pman = ctx.p1().manifest()
2833 pman = ctx.p1().manifest()
2834 for filename, change in pman.diff(man).items():
2834 for filename, change in pman.diff(man).items():
2835 fctx = repo.file(filename)
2835 fctx = repo.file(filename)
2836 f1 = fctx.revision(change[0][0] or -1)
2836 f1 = fctx.revision(change[0][0] or -1)
2837 f2 = fctx.revision(change[1][0] or -1)
2837 f2 = fctx.revision(change[1][0] or -1)
2838 textpairs.append((f1, f2))
2838 textpairs.append((f1, f2))
2839 else:
2839 else:
2840 dp = r.deltaparent(rev)
2840 dp = r.deltaparent(rev)
2841 textpairs.append((r.revision(dp), r.revision(rev)))
2841 textpairs.append((r.revision(dp), r.revision(rev)))
2842
2842
2843 withthreads = threads > 0
2843 withthreads = threads > 0
2844 if not withthreads:
2844 if not withthreads:
2845
2845
2846 def d():
2846 def d():
2847 for pair in textpairs:
2847 for pair in textpairs:
2848 if xdiff:
2848 if xdiff:
2849 mdiff.bdiff.xdiffblocks(*pair)
2849 mdiff.bdiff.xdiffblocks(*pair)
2850 elif blocks:
2850 elif blocks:
2851 mdiff.bdiff.blocks(*pair)
2851 mdiff.bdiff.blocks(*pair)
2852 else:
2852 else:
2853 mdiff.textdiff(*pair)
2853 mdiff.textdiff(*pair)
2854
2854
2855 else:
2855 else:
2856 q = queue()
2856 q = queue()
2857 for i in _xrange(threads):
2857 for i in _xrange(threads):
2858 q.put(None)
2858 q.put(None)
2859 ready = threading.Condition()
2859 ready = threading.Condition()
2860 done = threading.Event()
2860 done = threading.Event()
2861 for i in _xrange(threads):
2861 for i in _xrange(threads):
2862 threading.Thread(
2862 threading.Thread(
2863 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2863 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2864 ).start()
2864 ).start()
2865 q.join()
2865 q.join()
2866
2866
2867 def d():
2867 def d():
2868 for pair in textpairs:
2868 for pair in textpairs:
2869 q.put(pair)
2869 q.put(pair)
2870 for i in _xrange(threads):
2870 for i in _xrange(threads):
2871 q.put(None)
2871 q.put(None)
2872 with ready:
2872 with ready:
2873 ready.notify_all()
2873 ready.notify_all()
2874 q.join()
2874 q.join()
2875
2875
2876 timer, fm = gettimer(ui, opts)
2876 timer, fm = gettimer(ui, opts)
2877 timer(d)
2877 timer(d)
2878 fm.end()
2878 fm.end()
2879
2879
2880 if withthreads:
2880 if withthreads:
2881 done.set()
2881 done.set()
2882 for i in _xrange(threads):
2882 for i in _xrange(threads):
2883 q.put(None)
2883 q.put(None)
2884 with ready:
2884 with ready:
2885 ready.notify_all()
2885 ready.notify_all()
2886
2886
2887
2887
2888 @command(
2888 @command(
2889 b'perf::unbundle',
2889 b'perf::unbundle',
2890 formatteropts,
2890 formatteropts,
2891 b'BUNDLE_FILE',
2891 b'BUNDLE_FILE',
2892 )
2892 )
2893 def perf_unbundle(ui, repo, fname, **opts):
2893 def perf_unbundle(ui, repo, fname, **opts):
2894 """benchmark application of a bundle in a repository.
2894 """benchmark application of a bundle in a repository.
2895
2895
2896 This does not include the final transaction processing"""
2896 This does not include the final transaction processing"""
2897
2897
2898 from mercurial import exchange
2898 from mercurial import exchange
2899 from mercurial import bundle2
2899 from mercurial import bundle2
2900 from mercurial import transaction
2900 from mercurial import transaction
2901
2901
2902 opts = _byteskwargs(opts)
2902 opts = _byteskwargs(opts)
2903
2903
2904 ### some compatibility hotfix
2904 ### some compatibility hotfix
2905 #
2905 #
2906 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2906 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2907 # critical regression that break transaction rollback for files that are
2907 # critical regression that break transaction rollback for files that are
2908 # de-inlined.
2908 # de-inlined.
2909 method = transaction.transaction._addentry
2909 method = transaction.transaction._addentry
2910 pre_63edc384d3b7 = "data" in getargspec(method).args
2910 pre_63edc384d3b7 = "data" in getargspec(method).args
2911 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2911 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2912 # a changeset that is a close descendant of 18415fc918a1, the changeset
2912 # a changeset that is a close descendant of 18415fc918a1, the changeset
2913 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2913 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2914 args = getargspec(error.Abort.__init__).args
2914 args = getargspec(error.Abort.__init__).args
2915 post_18415fc918a1 = "detailed_exit_code" in args
2915 post_18415fc918a1 = "detailed_exit_code" in args
2916
2916
2917 old_max_inline = None
2917 old_max_inline = None
2918 try:
2918 try:
2919 if not (pre_63edc384d3b7 or post_18415fc918a1):
2919 if not (pre_63edc384d3b7 or post_18415fc918a1):
2920 # disable inlining
2920 # disable inlining
2921 old_max_inline = mercurial.revlog._maxinline
2921 old_max_inline = mercurial.revlog._maxinline
2922 # large enough to never happen
2922 # large enough to never happen
2923 mercurial.revlog._maxinline = 2 ** 50
2923 mercurial.revlog._maxinline = 2 ** 50
2924
2924
2925 with repo.lock():
2925 with repo.lock():
2926 bundle = [None, None]
2926 bundle = [None, None]
2927 orig_quiet = repo.ui.quiet
2927 orig_quiet = repo.ui.quiet
2928 try:
2928 try:
2929 repo.ui.quiet = True
2929 repo.ui.quiet = True
2930 with open(fname, mode="rb") as f:
2930 with open(fname, mode="rb") as f:
2931
2931
2932 def noop_report(*args, **kwargs):
2932 def noop_report(*args, **kwargs):
2933 pass
2933 pass
2934
2934
2935 def setup():
2935 def setup():
2936 gen, tr = bundle
2936 gen, tr = bundle
2937 if tr is not None:
2937 if tr is not None:
2938 tr.abort()
2938 tr.abort()
2939 bundle[:] = [None, None]
2939 bundle[:] = [None, None]
2940 f.seek(0)
2940 f.seek(0)
2941 bundle[0] = exchange.readbundle(ui, f, fname)
2941 bundle[0] = exchange.readbundle(ui, f, fname)
2942 bundle[1] = repo.transaction(b'perf::unbundle')
2942 bundle[1] = repo.transaction(b'perf::unbundle')
2943 # silence the transaction
2943 # silence the transaction
2944 bundle[1]._report = noop_report
2944 bundle[1]._report = noop_report
2945
2945
2946 def apply():
2946 def apply():
2947 gen, tr = bundle
2947 gen, tr = bundle
2948 bundle2.applybundle(
2948 bundle2.applybundle(
2949 repo,
2949 repo,
2950 gen,
2950 gen,
2951 tr,
2951 tr,
2952 source=b'perf::unbundle',
2952 source=b'perf::unbundle',
2953 url=fname,
2953 url=fname,
2954 )
2954 )
2955
2955
2956 timer, fm = gettimer(ui, opts)
2956 timer, fm = gettimer(ui, opts)
2957 timer(apply, setup=setup)
2957 timer(apply, setup=setup)
2958 fm.end()
2958 fm.end()
2959 finally:
2959 finally:
2960 repo.ui.quiet == orig_quiet
2960 repo.ui.quiet == orig_quiet
2961 gen, tr = bundle
2961 gen, tr = bundle
2962 if tr is not None:
2962 if tr is not None:
2963 tr.abort()
2963 tr.abort()
2964 finally:
2964 finally:
2965 if old_max_inline is not None:
2965 if old_max_inline is not None:
2966 mercurial.revlog._maxinline = old_max_inline
2966 mercurial.revlog._maxinline = old_max_inline
2967
2967
2968
2968
2969 @command(
2969 @command(
2970 b'perf::unidiff|perfunidiff',
2970 b'perf::unidiff|perfunidiff',
2971 revlogopts
2971 revlogopts
2972 + formatteropts
2972 + formatteropts
2973 + [
2973 + [
2974 (
2974 (
2975 b'',
2975 b'',
2976 b'count',
2976 b'count',
2977 1,
2977 1,
2978 b'number of revisions to test (when using --startrev)',
2978 b'number of revisions to test (when using --startrev)',
2979 ),
2979 ),
2980 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2980 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2981 ],
2981 ],
2982 b'-c|-m|FILE REV',
2982 b'-c|-m|FILE REV',
2983 )
2983 )
2984 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2984 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2985 """benchmark a unified diff between revisions
2985 """benchmark a unified diff between revisions
2986
2986
2987 This doesn't include any copy tracing - it's just a unified diff
2987 This doesn't include any copy tracing - it's just a unified diff
2988 of the texts.
2988 of the texts.
2989
2989
2990 By default, benchmark a diff between its delta parent and itself.
2990 By default, benchmark a diff between its delta parent and itself.
2991
2991
2992 With ``--count``, benchmark diffs between delta parents and self for N
2992 With ``--count``, benchmark diffs between delta parents and self for N
2993 revisions starting at the specified revision.
2993 revisions starting at the specified revision.
2994
2994
2995 With ``--alldata``, assume the requested revision is a changeset and
2995 With ``--alldata``, assume the requested revision is a changeset and
2996 measure diffs for all changes related to that changeset (manifest
2996 measure diffs for all changes related to that changeset (manifest
2997 and filelogs).
2997 and filelogs).
2998 """
2998 """
2999 opts = _byteskwargs(opts)
2999 opts = _byteskwargs(opts)
3000 if opts[b'alldata']:
3000 if opts[b'alldata']:
3001 opts[b'changelog'] = True
3001 opts[b'changelog'] = True
3002
3002
3003 if opts.get(b'changelog') or opts.get(b'manifest'):
3003 if opts.get(b'changelog') or opts.get(b'manifest'):
3004 file_, rev = None, file_
3004 file_, rev = None, file_
3005 elif rev is None:
3005 elif rev is None:
3006 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3006 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3007
3007
3008 textpairs = []
3008 textpairs = []
3009
3009
3010 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3010 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3011
3011
3012 startrev = r.rev(r.lookup(rev))
3012 startrev = r.rev(r.lookup(rev))
3013 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3013 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3014 if opts[b'alldata']:
3014 if opts[b'alldata']:
3015 # Load revisions associated with changeset.
3015 # Load revisions associated with changeset.
3016 ctx = repo[rev]
3016 ctx = repo[rev]
3017 mtext = _manifestrevision(repo, ctx.manifestnode())
3017 mtext = _manifestrevision(repo, ctx.manifestnode())
3018 for pctx in ctx.parents():
3018 for pctx in ctx.parents():
3019 pman = _manifestrevision(repo, pctx.manifestnode())
3019 pman = _manifestrevision(repo, pctx.manifestnode())
3020 textpairs.append((pman, mtext))
3020 textpairs.append((pman, mtext))
3021
3021
3022 # Load filelog revisions by iterating manifest delta.
3022 # Load filelog revisions by iterating manifest delta.
3023 man = ctx.manifest()
3023 man = ctx.manifest()
3024 pman = ctx.p1().manifest()
3024 pman = ctx.p1().manifest()
3025 for filename, change in pman.diff(man).items():
3025 for filename, change in pman.diff(man).items():
3026 fctx = repo.file(filename)
3026 fctx = repo.file(filename)
3027 f1 = fctx.revision(change[0][0] or -1)
3027 f1 = fctx.revision(change[0][0] or -1)
3028 f2 = fctx.revision(change[1][0] or -1)
3028 f2 = fctx.revision(change[1][0] or -1)
3029 textpairs.append((f1, f2))
3029 textpairs.append((f1, f2))
3030 else:
3030 else:
3031 dp = r.deltaparent(rev)
3031 dp = r.deltaparent(rev)
3032 textpairs.append((r.revision(dp), r.revision(rev)))
3032 textpairs.append((r.revision(dp), r.revision(rev)))
3033
3033
3034 def d():
3034 def d():
3035 for left, right in textpairs:
3035 for left, right in textpairs:
3036 # The date strings don't matter, so we pass empty strings.
3036 # The date strings don't matter, so we pass empty strings.
3037 headerlines, hunks = mdiff.unidiff(
3037 headerlines, hunks = mdiff.unidiff(
3038 left, b'', right, b'', b'left', b'right', binary=False
3038 left, b'', right, b'', b'left', b'right', binary=False
3039 )
3039 )
3040 # consume iterators in roughly the way patch.py does
3040 # consume iterators in roughly the way patch.py does
3041 b'\n'.join(headerlines)
3041 b'\n'.join(headerlines)
3042 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3042 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3043
3043
3044 timer, fm = gettimer(ui, opts)
3044 timer, fm = gettimer(ui, opts)
3045 timer(d)
3045 timer(d)
3046 fm.end()
3046 fm.end()
3047
3047
3048
3048
3049 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3049 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3050 def perfdiffwd(ui, repo, **opts):
3050 def perfdiffwd(ui, repo, **opts):
3051 """Profile diff of working directory changes"""
3051 """Profile diff of working directory changes"""
3052 opts = _byteskwargs(opts)
3052 opts = _byteskwargs(opts)
3053 timer, fm = gettimer(ui, opts)
3053 timer, fm = gettimer(ui, opts)
3054 options = {
3054 options = {
3055 'w': 'ignore_all_space',
3055 'w': 'ignore_all_space',
3056 'b': 'ignore_space_change',
3056 'b': 'ignore_space_change',
3057 'B': 'ignore_blank_lines',
3057 'B': 'ignore_blank_lines',
3058 }
3058 }
3059
3059
3060 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3060 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3061 opts = {options[c]: b'1' for c in diffopt}
3061 opts = {options[c]: b'1' for c in diffopt}
3062
3062
3063 def d():
3063 def d():
3064 ui.pushbuffer()
3064 ui.pushbuffer()
3065 commands.diff(ui, repo, **opts)
3065 commands.diff(ui, repo, **opts)
3066 ui.popbuffer()
3066 ui.popbuffer()
3067
3067
3068 diffopt = diffopt.encode('ascii')
3068 diffopt = diffopt.encode('ascii')
3069 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3069 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3070 timer(d, title=title)
3070 timer(d, title=title)
3071 fm.end()
3071 fm.end()
3072
3072
3073
3073
3074 @command(
3074 @command(
3075 b'perf::revlogindex|perfrevlogindex',
3075 b'perf::revlogindex|perfrevlogindex',
3076 revlogopts + formatteropts,
3076 revlogopts + formatteropts,
3077 b'-c|-m|FILE',
3077 b'-c|-m|FILE',
3078 )
3078 )
3079 def perfrevlogindex(ui, repo, file_=None, **opts):
3079 def perfrevlogindex(ui, repo, file_=None, **opts):
3080 """Benchmark operations against a revlog index.
3080 """Benchmark operations against a revlog index.
3081
3081
3082 This tests constructing a revlog instance, reading index data,
3082 This tests constructing a revlog instance, reading index data,
3083 parsing index data, and performing various operations related to
3083 parsing index data, and performing various operations related to
3084 index data.
3084 index data.
3085 """
3085 """
3086
3086
3087 opts = _byteskwargs(opts)
3087 opts = _byteskwargs(opts)
3088
3088
3089 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3089 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3090
3090
3091 opener = getattr(rl, 'opener') # trick linter
3091 opener = getattr(rl, 'opener') # trick linter
3092 # compat with hg <= 5.8
3092 # compat with hg <= 5.8
3093 radix = getattr(rl, 'radix', None)
3093 radix = getattr(rl, 'radix', None)
3094 indexfile = getattr(rl, '_indexfile', None)
3094 indexfile = getattr(rl, '_indexfile', None)
3095 if indexfile is None:
3095 if indexfile is None:
3096 # compatibility with <= hg-5.8
3096 # compatibility with <= hg-5.8
3097 indexfile = getattr(rl, 'indexfile')
3097 indexfile = getattr(rl, 'indexfile')
3098 data = opener.read(indexfile)
3098 data = opener.read(indexfile)
3099
3099
3100 header = struct.unpack(b'>I', data[0:4])[0]
3100 header = struct.unpack(b'>I', data[0:4])[0]
3101 version = header & 0xFFFF
3101 version = header & 0xFFFF
3102 if version == 1:
3102 if version == 1:
3103 inline = header & (1 << 16)
3103 inline = header & (1 << 16)
3104 else:
3104 else:
3105 raise error.Abort(b'unsupported revlog version: %d' % version)
3105 raise error.Abort(b'unsupported revlog version: %d' % version)
3106
3106
3107 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3107 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3108 if parse_index_v1 is None:
3108 if parse_index_v1 is None:
3109 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3109 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3110
3110
3111 rllen = len(rl)
3111 rllen = len(rl)
3112
3112
3113 node0 = rl.node(0)
3113 node0 = rl.node(0)
3114 node25 = rl.node(rllen // 4)
3114 node25 = rl.node(rllen // 4)
3115 node50 = rl.node(rllen // 2)
3115 node50 = rl.node(rllen // 2)
3116 node75 = rl.node(rllen // 4 * 3)
3116 node75 = rl.node(rllen // 4 * 3)
3117 node100 = rl.node(rllen - 1)
3117 node100 = rl.node(rllen - 1)
3118
3118
3119 allrevs = range(rllen)
3119 allrevs = range(rllen)
3120 allrevsrev = list(reversed(allrevs))
3120 allrevsrev = list(reversed(allrevs))
3121 allnodes = [rl.node(rev) for rev in range(rllen)]
3121 allnodes = [rl.node(rev) for rev in range(rllen)]
3122 allnodesrev = list(reversed(allnodes))
3122 allnodesrev = list(reversed(allnodes))
3123
3123
3124 def constructor():
3124 def constructor():
3125 if radix is not None:
3125 if radix is not None:
3126 revlog(opener, radix=radix)
3126 revlog(opener, radix=radix)
3127 else:
3127 else:
3128 # hg <= 5.8
3128 # hg <= 5.8
3129 revlog(opener, indexfile=indexfile)
3129 revlog(opener, indexfile=indexfile)
3130
3130
3131 def read():
3131 def read():
3132 with opener(indexfile) as fh:
3132 with opener(indexfile) as fh:
3133 fh.read()
3133 fh.read()
3134
3134
3135 def parseindex():
3135 def parseindex():
3136 parse_index_v1(data, inline)
3136 parse_index_v1(data, inline)
3137
3137
3138 def getentry(revornode):
3138 def getentry(revornode):
3139 index = parse_index_v1(data, inline)[0]
3139 index = parse_index_v1(data, inline)[0]
3140 index[revornode]
3140 index[revornode]
3141
3141
3142 def getentries(revs, count=1):
3142 def getentries(revs, count=1):
3143 index = parse_index_v1(data, inline)[0]
3143 index = parse_index_v1(data, inline)[0]
3144
3144
3145 for i in range(count):
3145 for i in range(count):
3146 for rev in revs:
3146 for rev in revs:
3147 index[rev]
3147 index[rev]
3148
3148
3149 def resolvenode(node):
3149 def resolvenode(node):
3150 index = parse_index_v1(data, inline)[0]
3150 index = parse_index_v1(data, inline)[0]
3151 rev = getattr(index, 'rev', None)
3151 rev = getattr(index, 'rev', None)
3152 if rev is None:
3152 if rev is None:
3153 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3153 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3154 # This only works for the C code.
3154 # This only works for the C code.
3155 if nodemap is None:
3155 if nodemap is None:
3156 return
3156 return
3157 rev = nodemap.__getitem__
3157 rev = nodemap.__getitem__
3158
3158
3159 try:
3159 try:
3160 rev(node)
3160 rev(node)
3161 except error.RevlogError:
3161 except error.RevlogError:
3162 pass
3162 pass
3163
3163
3164 def resolvenodes(nodes, count=1):
3164 def resolvenodes(nodes, count=1):
3165 index = parse_index_v1(data, inline)[0]
3165 index = parse_index_v1(data, inline)[0]
3166 rev = getattr(index, 'rev', None)
3166 rev = getattr(index, 'rev', None)
3167 if rev is None:
3167 if rev is None:
3168 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3168 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3169 # This only works for the C code.
3169 # This only works for the C code.
3170 if nodemap is None:
3170 if nodemap is None:
3171 return
3171 return
3172 rev = nodemap.__getitem__
3172 rev = nodemap.__getitem__
3173
3173
3174 for i in range(count):
3174 for i in range(count):
3175 for node in nodes:
3175 for node in nodes:
3176 try:
3176 try:
3177 rev(node)
3177 rev(node)
3178 except error.RevlogError:
3178 except error.RevlogError:
3179 pass
3179 pass
3180
3180
3181 benches = [
3181 benches = [
3182 (constructor, b'revlog constructor'),
3182 (constructor, b'revlog constructor'),
3183 (read, b'read'),
3183 (read, b'read'),
3184 (parseindex, b'create index object'),
3184 (parseindex, b'create index object'),
3185 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3185 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3186 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3186 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3187 (lambda: resolvenode(node0), b'look up node at rev 0'),
3187 (lambda: resolvenode(node0), b'look up node at rev 0'),
3188 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3188 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3189 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3189 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3190 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3190 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3191 (lambda: resolvenode(node100), b'look up node at tip'),
3191 (lambda: resolvenode(node100), b'look up node at tip'),
3192 # 2x variation is to measure caching impact.
3192 # 2x variation is to measure caching impact.
3193 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3193 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3194 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3194 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3195 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3195 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3196 (
3196 (
3197 lambda: resolvenodes(allnodesrev, 2),
3197 lambda: resolvenodes(allnodesrev, 2),
3198 b'look up all nodes 2x (reverse)',
3198 b'look up all nodes 2x (reverse)',
3199 ),
3199 ),
3200 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3200 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3201 (
3201 (
3202 lambda: getentries(allrevs, 2),
3202 lambda: getentries(allrevs, 2),
3203 b'retrieve all index entries 2x (forward)',
3203 b'retrieve all index entries 2x (forward)',
3204 ),
3204 ),
3205 (
3205 (
3206 lambda: getentries(allrevsrev),
3206 lambda: getentries(allrevsrev),
3207 b'retrieve all index entries (reverse)',
3207 b'retrieve all index entries (reverse)',
3208 ),
3208 ),
3209 (
3209 (
3210 lambda: getentries(allrevsrev, 2),
3210 lambda: getentries(allrevsrev, 2),
3211 b'retrieve all index entries 2x (reverse)',
3211 b'retrieve all index entries 2x (reverse)',
3212 ),
3212 ),
3213 ]
3213 ]
3214
3214
3215 for fn, title in benches:
3215 for fn, title in benches:
3216 timer, fm = gettimer(ui, opts)
3216 timer, fm = gettimer(ui, opts)
3217 timer(fn, title=title)
3217 timer(fn, title=title)
3218 fm.end()
3218 fm.end()
3219
3219
3220
3220
3221 @command(
3221 @command(
3222 b'perf::revlogrevisions|perfrevlogrevisions',
3222 b'perf::revlogrevisions|perfrevlogrevisions',
3223 revlogopts
3223 revlogopts
3224 + formatteropts
3224 + formatteropts
3225 + [
3225 + [
3226 (b'd', b'dist', 100, b'distance between the revisions'),
3226 (b'd', b'dist', 100, b'distance between the revisions'),
3227 (b's', b'startrev', 0, b'revision to start reading at'),
3227 (b's', b'startrev', 0, b'revision to start reading at'),
3228 (b'', b'reverse', False, b'read in reverse'),
3228 (b'', b'reverse', False, b'read in reverse'),
3229 ],
3229 ],
3230 b'-c|-m|FILE',
3230 b'-c|-m|FILE',
3231 )
3231 )
3232 def perfrevlogrevisions(
3232 def perfrevlogrevisions(
3233 ui, repo, file_=None, startrev=0, reverse=False, **opts
3233 ui, repo, file_=None, startrev=0, reverse=False, **opts
3234 ):
3234 ):
3235 """Benchmark reading a series of revisions from a revlog.
3235 """Benchmark reading a series of revisions from a revlog.
3236
3236
3237 By default, we read every ``-d/--dist`` revision from 0 to tip of
3237 By default, we read every ``-d/--dist`` revision from 0 to tip of
3238 the specified revlog.
3238 the specified revlog.
3239
3239
3240 The start revision can be defined via ``-s/--startrev``.
3240 The start revision can be defined via ``-s/--startrev``.
3241 """
3241 """
3242 opts = _byteskwargs(opts)
3242 opts = _byteskwargs(opts)
3243
3243
3244 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3244 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3245 rllen = getlen(ui)(rl)
3245 rllen = getlen(ui)(rl)
3246
3246
3247 if startrev < 0:
3247 if startrev < 0:
3248 startrev = rllen + startrev
3248 startrev = rllen + startrev
3249
3249
3250 def d():
3250 def d():
3251 rl.clearcaches()
3251 rl.clearcaches()
3252
3252
3253 beginrev = startrev
3253 beginrev = startrev
3254 endrev = rllen
3254 endrev = rllen
3255 dist = opts[b'dist']
3255 dist = opts[b'dist']
3256
3256
3257 if reverse:
3257 if reverse:
3258 beginrev, endrev = endrev - 1, beginrev - 1
3258 beginrev, endrev = endrev - 1, beginrev - 1
3259 dist = -1 * dist
3259 dist = -1 * dist
3260
3260
3261 for x in _xrange(beginrev, endrev, dist):
3261 for x in _xrange(beginrev, endrev, dist):
3262 # Old revisions don't support passing int.
3262 # Old revisions don't support passing int.
3263 n = rl.node(x)
3263 n = rl.node(x)
3264 rl.revision(n)
3264 rl.revision(n)
3265
3265
3266 timer, fm = gettimer(ui, opts)
3266 timer, fm = gettimer(ui, opts)
3267 timer(d)
3267 timer(d)
3268 fm.end()
3268 fm.end()
3269
3269
3270
3270
3271 @command(
3271 @command(
3272 b'perf::revlogwrite|perfrevlogwrite',
3272 b'perf::revlogwrite|perfrevlogwrite',
3273 revlogopts
3273 revlogopts
3274 + formatteropts
3274 + formatteropts
3275 + [
3275 + [
3276 (b's', b'startrev', 1000, b'revision to start writing at'),
3276 (b's', b'startrev', 1000, b'revision to start writing at'),
3277 (b'', b'stoprev', -1, b'last revision to write'),
3277 (b'', b'stoprev', -1, b'last revision to write'),
3278 (b'', b'count', 3, b'number of passes to perform'),
3278 (b'', b'count', 3, b'number of passes to perform'),
3279 (b'', b'details', False, b'print timing for every revisions tested'),
3279 (b'', b'details', False, b'print timing for every revisions tested'),
3280 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3280 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3281 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3281 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3282 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3282 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3283 ],
3283 ],
3284 b'-c|-m|FILE',
3284 b'-c|-m|FILE',
3285 )
3285 )
3286 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3286 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3287 """Benchmark writing a series of revisions to a revlog.
3287 """Benchmark writing a series of revisions to a revlog.
3288
3288
3289 Possible source values are:
3289 Possible source values are:
3290 * `full`: add from a full text (default).
3290 * `full`: add from a full text (default).
3291 * `parent-1`: add from a delta to the first parent
3291 * `parent-1`: add from a delta to the first parent
3292 * `parent-2`: add from a delta to the second parent if it exists
3292 * `parent-2`: add from a delta to the second parent if it exists
3293 (use a delta from the first parent otherwise)
3293 (use a delta from the first parent otherwise)
3294 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3294 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3295 * `storage`: add from the existing precomputed deltas
3295 * `storage`: add from the existing precomputed deltas
3296
3296
3297 Note: This performance command measures performance in a custom way. As a
3297 Note: This performance command measures performance in a custom way. As a
3298 result some of the global configuration of the 'perf' command does not
3298 result some of the global configuration of the 'perf' command does not
3299 apply to it:
3299 apply to it:
3300
3300
3301 * ``pre-run``: disabled
3301 * ``pre-run``: disabled
3302
3302
3303 * ``profile-benchmark``: disabled
3303 * ``profile-benchmark``: disabled
3304
3304
3305 * ``run-limits``: disabled use --count instead
3305 * ``run-limits``: disabled use --count instead
3306 """
3306 """
3307 opts = _byteskwargs(opts)
3307 opts = _byteskwargs(opts)
3308
3308
3309 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3309 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3310 rllen = getlen(ui)(rl)
3310 rllen = getlen(ui)(rl)
3311 if startrev < 0:
3311 if startrev < 0:
3312 startrev = rllen + startrev
3312 startrev = rllen + startrev
3313 if stoprev < 0:
3313 if stoprev < 0:
3314 stoprev = rllen + stoprev
3314 stoprev = rllen + stoprev
3315
3315
3316 lazydeltabase = opts['lazydeltabase']
3316 lazydeltabase = opts['lazydeltabase']
3317 source = opts['source']
3317 source = opts['source']
3318 clearcaches = opts['clear_caches']
3318 clearcaches = opts['clear_caches']
3319 validsource = (
3319 validsource = (
3320 b'full',
3320 b'full',
3321 b'parent-1',
3321 b'parent-1',
3322 b'parent-2',
3322 b'parent-2',
3323 b'parent-smallest',
3323 b'parent-smallest',
3324 b'storage',
3324 b'storage',
3325 )
3325 )
3326 if source not in validsource:
3326 if source not in validsource:
3327 raise error.Abort('invalid source type: %s' % source)
3327 raise error.Abort('invalid source type: %s' % source)
3328
3328
3329 ### actually gather results
3329 ### actually gather results
3330 count = opts['count']
3330 count = opts['count']
3331 if count <= 0:
3331 if count <= 0:
3332 raise error.Abort('invalide run count: %d' % count)
3332 raise error.Abort('invalide run count: %d' % count)
3333 allresults = []
3333 allresults = []
3334 for c in range(count):
3334 for c in range(count):
3335 timing = _timeonewrite(
3335 timing = _timeonewrite(
3336 ui,
3336 ui,
3337 rl,
3337 rl,
3338 source,
3338 source,
3339 startrev,
3339 startrev,
3340 stoprev,
3340 stoprev,
3341 c + 1,
3341 c + 1,
3342 lazydeltabase=lazydeltabase,
3342 lazydeltabase=lazydeltabase,
3343 clearcaches=clearcaches,
3343 clearcaches=clearcaches,
3344 )
3344 )
3345 allresults.append(timing)
3345 allresults.append(timing)
3346
3346
3347 ### consolidate the results in a single list
3347 ### consolidate the results in a single list
3348 results = []
3348 results = []
3349 for idx, (rev, t) in enumerate(allresults[0]):
3349 for idx, (rev, t) in enumerate(allresults[0]):
3350 ts = [t]
3350 ts = [t]
3351 for other in allresults[1:]:
3351 for other in allresults[1:]:
3352 orev, ot = other[idx]
3352 orev, ot = other[idx]
3353 assert orev == rev
3353 assert orev == rev
3354 ts.append(ot)
3354 ts.append(ot)
3355 results.append((rev, ts))
3355 results.append((rev, ts))
3356 resultcount = len(results)
3356 resultcount = len(results)
3357
3357
3358 ### Compute and display relevant statistics
3358 ### Compute and display relevant statistics
3359
3359
3360 # get a formatter
3360 # get a formatter
3361 fm = ui.formatter(b'perf', opts)
3361 fm = ui.formatter(b'perf', opts)
3362 displayall = ui.configbool(b"perf", b"all-timing", False)
3362 displayall = ui.configbool(b"perf", b"all-timing", False)
3363
3363
3364 # print individual details if requested
3364 # print individual details if requested
3365 if opts['details']:
3365 if opts['details']:
3366 for idx, item in enumerate(results, 1):
3366 for idx, item in enumerate(results, 1):
3367 rev, data = item
3367 rev, data = item
3368 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3368 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3369 formatone(fm, data, title=title, displayall=displayall)
3369 formatone(fm, data, title=title, displayall=displayall)
3370
3370
3371 # sorts results by median time
3371 # sorts results by median time
3372 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3372 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3373 # list of (name, index) to display)
3373 # list of (name, index) to display)
3374 relevants = [
3374 relevants = [
3375 ("min", 0),
3375 ("min", 0),
3376 ("10%", resultcount * 10 // 100),
3376 ("10%", resultcount * 10 // 100),
3377 ("25%", resultcount * 25 // 100),
3377 ("25%", resultcount * 25 // 100),
3378 ("50%", resultcount * 70 // 100),
3378 ("50%", resultcount * 70 // 100),
3379 ("75%", resultcount * 75 // 100),
3379 ("75%", resultcount * 75 // 100),
3380 ("90%", resultcount * 90 // 100),
3380 ("90%", resultcount * 90 // 100),
3381 ("95%", resultcount * 95 // 100),
3381 ("95%", resultcount * 95 // 100),
3382 ("99%", resultcount * 99 // 100),
3382 ("99%", resultcount * 99 // 100),
3383 ("99.9%", resultcount * 999 // 1000),
3383 ("99.9%", resultcount * 999 // 1000),
3384 ("99.99%", resultcount * 9999 // 10000),
3384 ("99.99%", resultcount * 9999 // 10000),
3385 ("99.999%", resultcount * 99999 // 100000),
3385 ("99.999%", resultcount * 99999 // 100000),
3386 ("max", -1),
3386 ("max", -1),
3387 ]
3387 ]
3388 if not ui.quiet:
3388 if not ui.quiet:
3389 for name, idx in relevants:
3389 for name, idx in relevants:
3390 data = results[idx]
3390 data = results[idx]
3391 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3391 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3392 formatone(fm, data[1], title=title, displayall=displayall)
3392 formatone(fm, data[1], title=title, displayall=displayall)
3393
3393
3394 # XXX summing that many float will not be very precise, we ignore this fact
3394 # XXX summing that many float will not be very precise, we ignore this fact
3395 # for now
3395 # for now
3396 totaltime = []
3396 totaltime = []
3397 for item in allresults:
3397 for item in allresults:
3398 totaltime.append(
3398 totaltime.append(
3399 (
3399 (
3400 sum(x[1][0] for x in item),
3400 sum(x[1][0] for x in item),
3401 sum(x[1][1] for x in item),
3401 sum(x[1][1] for x in item),
3402 sum(x[1][2] for x in item),
3402 sum(x[1][2] for x in item),
3403 )
3403 )
3404 )
3404 )
3405 formatone(
3405 formatone(
3406 fm,
3406 fm,
3407 totaltime,
3407 totaltime,
3408 title="total time (%d revs)" % resultcount,
3408 title="total time (%d revs)" % resultcount,
3409 displayall=displayall,
3409 displayall=displayall,
3410 )
3410 )
3411 fm.end()
3411 fm.end()
3412
3412
3413
3413
3414 class _faketr:
3414 class _faketr:
3415 def add(s, x, y, z=None):
3415 def add(s, x, y, z=None):
3416 return None
3416 return None
3417
3417
3418
3418
3419 def _timeonewrite(
3419 def _timeonewrite(
3420 ui,
3420 ui,
3421 orig,
3421 orig,
3422 source,
3422 source,
3423 startrev,
3423 startrev,
3424 stoprev,
3424 stoprev,
3425 runidx=None,
3425 runidx=None,
3426 lazydeltabase=True,
3426 lazydeltabase=True,
3427 clearcaches=True,
3427 clearcaches=True,
3428 ):
3428 ):
3429 timings = []
3429 timings = []
3430 tr = _faketr()
3430 tr = _faketr()
3431 with _temprevlog(ui, orig, startrev) as dest:
3431 with _temprevlog(ui, orig, startrev) as dest:
3432 dest._lazydeltabase = lazydeltabase
3432 dest._lazydeltabase = lazydeltabase
3433 revs = list(orig.revs(startrev, stoprev))
3433 revs = list(orig.revs(startrev, stoprev))
3434 total = len(revs)
3434 total = len(revs)
3435 topic = 'adding'
3435 topic = 'adding'
3436 if runidx is not None:
3436 if runidx is not None:
3437 topic += ' (run #%d)' % runidx
3437 topic += ' (run #%d)' % runidx
3438 # Support both old and new progress API
3438 # Support both old and new progress API
3439 if util.safehasattr(ui, 'makeprogress'):
3439 if util.safehasattr(ui, 'makeprogress'):
3440 progress = ui.makeprogress(topic, unit='revs', total=total)
3440 progress = ui.makeprogress(topic, unit='revs', total=total)
3441
3441
3442 def updateprogress(pos):
3442 def updateprogress(pos):
3443 progress.update(pos)
3443 progress.update(pos)
3444
3444
3445 def completeprogress():
3445 def completeprogress():
3446 progress.complete()
3446 progress.complete()
3447
3447
3448 else:
3448 else:
3449
3449
3450 def updateprogress(pos):
3450 def updateprogress(pos):
3451 ui.progress(topic, pos, unit='revs', total=total)
3451 ui.progress(topic, pos, unit='revs', total=total)
3452
3452
3453 def completeprogress():
3453 def completeprogress():
3454 ui.progress(topic, None, unit='revs', total=total)
3454 ui.progress(topic, None, unit='revs', total=total)
3455
3455
3456 for idx, rev in enumerate(revs):
3456 for idx, rev in enumerate(revs):
3457 updateprogress(idx)
3457 updateprogress(idx)
3458 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3458 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3459 if clearcaches:
3459 if clearcaches:
3460 dest.index.clearcaches()
3460 dest.index.clearcaches()
3461 dest.clearcaches()
3461 dest.clearcaches()
3462 with timeone() as r:
3462 with timeone() as r:
3463 dest.addrawrevision(*addargs, **addkwargs)
3463 dest.addrawrevision(*addargs, **addkwargs)
3464 timings.append((rev, r[0]))
3464 timings.append((rev, r[0]))
3465 updateprogress(total)
3465 updateprogress(total)
3466 completeprogress()
3466 completeprogress()
3467 return timings
3467 return timings
3468
3468
3469
3469
3470 def _getrevisionseed(orig, rev, tr, source):
3470 def _getrevisionseed(orig, rev, tr, source):
3471 from mercurial.node import nullid
3471 from mercurial.node import nullid
3472
3472
3473 linkrev = orig.linkrev(rev)
3473 linkrev = orig.linkrev(rev)
3474 node = orig.node(rev)
3474 node = orig.node(rev)
3475 p1, p2 = orig.parents(node)
3475 p1, p2 = orig.parents(node)
3476 flags = orig.flags(rev)
3476 flags = orig.flags(rev)
3477 cachedelta = None
3477 cachedelta = None
3478 text = None
3478 text = None
3479
3479
3480 if source == b'full':
3480 if source == b'full':
3481 text = orig.revision(rev)
3481 text = orig.revision(rev)
3482 elif source == b'parent-1':
3482 elif source == b'parent-1':
3483 baserev = orig.rev(p1)
3483 baserev = orig.rev(p1)
3484 cachedelta = (baserev, orig.revdiff(p1, rev))
3484 cachedelta = (baserev, orig.revdiff(p1, rev))
3485 elif source == b'parent-2':
3485 elif source == b'parent-2':
3486 parent = p2
3486 parent = p2
3487 if p2 == nullid:
3487 if p2 == nullid:
3488 parent = p1
3488 parent = p1
3489 baserev = orig.rev(parent)
3489 baserev = orig.rev(parent)
3490 cachedelta = (baserev, orig.revdiff(parent, rev))
3490 cachedelta = (baserev, orig.revdiff(parent, rev))
3491 elif source == b'parent-smallest':
3491 elif source == b'parent-smallest':
3492 p1diff = orig.revdiff(p1, rev)
3492 p1diff = orig.revdiff(p1, rev)
3493 parent = p1
3493 parent = p1
3494 diff = p1diff
3494 diff = p1diff
3495 if p2 != nullid:
3495 if p2 != nullid:
3496 p2diff = orig.revdiff(p2, rev)
3496 p2diff = orig.revdiff(p2, rev)
3497 if len(p1diff) > len(p2diff):
3497 if len(p1diff) > len(p2diff):
3498 parent = p2
3498 parent = p2
3499 diff = p2diff
3499 diff = p2diff
3500 baserev = orig.rev(parent)
3500 baserev = orig.rev(parent)
3501 cachedelta = (baserev, diff)
3501 cachedelta = (baserev, diff)
3502 elif source == b'storage':
3502 elif source == b'storage':
3503 baserev = orig.deltaparent(rev)
3503 baserev = orig.deltaparent(rev)
3504 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3504 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3505
3505
3506 return (
3506 return (
3507 (text, tr, linkrev, p1, p2),
3507 (text, tr, linkrev, p1, p2),
3508 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3508 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3509 )
3509 )
3510
3510
3511
3511
3512 @contextlib.contextmanager
3512 @contextlib.contextmanager
3513 def _temprevlog(ui, orig, truncaterev):
3513 def _temprevlog(ui, orig, truncaterev):
3514 from mercurial import vfs as vfsmod
3514 from mercurial import vfs as vfsmod
3515
3515
3516 if orig._inline:
3516 if orig._inline:
3517 raise error.Abort('not supporting inline revlog (yet)')
3517 raise error.Abort('not supporting inline revlog (yet)')
3518 revlogkwargs = {}
3518 revlogkwargs = {}
3519 k = 'upperboundcomp'
3519 k = 'upperboundcomp'
3520 if util.safehasattr(orig, k):
3520 if util.safehasattr(orig, k):
3521 revlogkwargs[k] = getattr(orig, k)
3521 revlogkwargs[k] = getattr(orig, k)
3522
3522
3523 indexfile = getattr(orig, '_indexfile', None)
3523 indexfile = getattr(orig, '_indexfile', None)
3524 if indexfile is None:
3524 if indexfile is None:
3525 # compatibility with <= hg-5.8
3525 # compatibility with <= hg-5.8
3526 indexfile = getattr(orig, 'indexfile')
3526 indexfile = getattr(orig, 'indexfile')
3527 origindexpath = orig.opener.join(indexfile)
3527 origindexpath = orig.opener.join(indexfile)
3528
3528
3529 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3529 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3530 origdatapath = orig.opener.join(datafile)
3530 origdatapath = orig.opener.join(datafile)
3531 radix = b'revlog'
3531 radix = b'revlog'
3532 indexname = b'revlog.i'
3532 indexname = b'revlog.i'
3533 dataname = b'revlog.d'
3533 dataname = b'revlog.d'
3534
3534
3535 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3535 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3536 try:
3536 try:
3537 # copy the data file in a temporary directory
3537 # copy the data file in a temporary directory
3538 ui.debug('copying data in %s\n' % tmpdir)
3538 ui.debug('copying data in %s\n' % tmpdir)
3539 destindexpath = os.path.join(tmpdir, 'revlog.i')
3539 destindexpath = os.path.join(tmpdir, 'revlog.i')
3540 destdatapath = os.path.join(tmpdir, 'revlog.d')
3540 destdatapath = os.path.join(tmpdir, 'revlog.d')
3541 shutil.copyfile(origindexpath, destindexpath)
3541 shutil.copyfile(origindexpath, destindexpath)
3542 shutil.copyfile(origdatapath, destdatapath)
3542 shutil.copyfile(origdatapath, destdatapath)
3543
3543
3544 # remove the data we want to add again
3544 # remove the data we want to add again
3545 ui.debug('truncating data to be rewritten\n')
3545 ui.debug('truncating data to be rewritten\n')
3546 with open(destindexpath, 'ab') as index:
3546 with open(destindexpath, 'ab') as index:
3547 index.seek(0)
3547 index.seek(0)
3548 index.truncate(truncaterev * orig._io.size)
3548 index.truncate(truncaterev * orig._io.size)
3549 with open(destdatapath, 'ab') as data:
3549 with open(destdatapath, 'ab') as data:
3550 data.seek(0)
3550 data.seek(0)
3551 data.truncate(orig.start(truncaterev))
3551 data.truncate(orig.start(truncaterev))
3552
3552
3553 # instantiate a new revlog from the temporary copy
3553 # instantiate a new revlog from the temporary copy
3554 ui.debug('truncating adding to be rewritten\n')
3554 ui.debug('truncating adding to be rewritten\n')
3555 vfs = vfsmod.vfs(tmpdir)
3555 vfs = vfsmod.vfs(tmpdir)
3556 vfs.options = getattr(orig.opener, 'options', None)
3556 vfs.options = getattr(orig.opener, 'options', None)
3557
3557
3558 try:
3558 try:
3559 dest = revlog(vfs, radix=radix, **revlogkwargs)
3559 dest = revlog(vfs, radix=radix, **revlogkwargs)
3560 except TypeError:
3560 except TypeError:
3561 dest = revlog(
3561 dest = revlog(
3562 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3562 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3563 )
3563 )
3564 if dest._inline:
3564 if dest._inline:
3565 raise error.Abort('not supporting inline revlog (yet)')
3565 raise error.Abort('not supporting inline revlog (yet)')
3566 # make sure internals are initialized
3566 # make sure internals are initialized
3567 dest.revision(len(dest) - 1)
3567 dest.revision(len(dest) - 1)
3568 yield dest
3568 yield dest
3569 del dest, vfs
3569 del dest, vfs
3570 finally:
3570 finally:
3571 shutil.rmtree(tmpdir, True)
3571 shutil.rmtree(tmpdir, True)
3572
3572
3573
3573
3574 @command(
3574 @command(
3575 b'perf::revlogchunks|perfrevlogchunks',
3575 b'perf::revlogchunks|perfrevlogchunks',
3576 revlogopts
3576 revlogopts
3577 + formatteropts
3577 + formatteropts
3578 + [
3578 + [
3579 (b'e', b'engines', b'', b'compression engines to use'),
3579 (b'e', b'engines', b'', b'compression engines to use'),
3580 (b's', b'startrev', 0, b'revision to start at'),
3580 (b's', b'startrev', 0, b'revision to start at'),
3581 ],
3581 ],
3582 b'-c|-m|FILE',
3582 b'-c|-m|FILE',
3583 )
3583 )
3584 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3584 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3585 """Benchmark operations on revlog chunks.
3585 """Benchmark operations on revlog chunks.
3586
3586
3587 Logically, each revlog is a collection of fulltext revisions. However,
3587 Logically, each revlog is a collection of fulltext revisions. However,
3588 stored within each revlog are "chunks" of possibly compressed data. This
3588 stored within each revlog are "chunks" of possibly compressed data. This
3589 data needs to be read and decompressed or compressed and written.
3589 data needs to be read and decompressed or compressed and written.
3590
3590
3591 This command measures the time it takes to read+decompress and recompress
3591 This command measures the time it takes to read+decompress and recompress
3592 chunks in a revlog. It effectively isolates I/O and compression performance.
3592 chunks in a revlog. It effectively isolates I/O and compression performance.
3593 For measurements of higher-level operations like resolving revisions,
3593 For measurements of higher-level operations like resolving revisions,
3594 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3594 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3595 """
3595 """
3596 opts = _byteskwargs(opts)
3596 opts = _byteskwargs(opts)
3597
3597
3598 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3598 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3599
3599
3600 # _chunkraw was renamed to _getsegmentforrevs.
3600 # _chunkraw was renamed to _getsegmentforrevs.
3601 try:
3601 try:
3602 segmentforrevs = rl._getsegmentforrevs
3602 segmentforrevs = rl._getsegmentforrevs
3603 except AttributeError:
3603 except AttributeError:
3604 segmentforrevs = rl._chunkraw
3604 segmentforrevs = rl._chunkraw
3605
3605
3606 # Verify engines argument.
3606 # Verify engines argument.
3607 if engines:
3607 if engines:
3608 engines = {e.strip() for e in engines.split(b',')}
3608 engines = {e.strip() for e in engines.split(b',')}
3609 for engine in engines:
3609 for engine in engines:
3610 try:
3610 try:
3611 util.compressionengines[engine]
3611 util.compressionengines[engine]
3612 except KeyError:
3612 except KeyError:
3613 raise error.Abort(b'unknown compression engine: %s' % engine)
3613 raise error.Abort(b'unknown compression engine: %s' % engine)
3614 else:
3614 else:
3615 engines = []
3615 engines = []
3616 for e in util.compengines:
3616 for e in util.compengines:
3617 engine = util.compengines[e]
3617 engine = util.compengines[e]
3618 try:
3618 try:
3619 if engine.available():
3619 if engine.available():
3620 engine.revlogcompressor().compress(b'dummy')
3620 engine.revlogcompressor().compress(b'dummy')
3621 engines.append(e)
3621 engines.append(e)
3622 except NotImplementedError:
3622 except NotImplementedError:
3623 pass
3623 pass
3624
3624
3625 revs = list(rl.revs(startrev, len(rl) - 1))
3625 revs = list(rl.revs(startrev, len(rl) - 1))
3626
3626
3627 def rlfh(rl):
3627 def rlfh(rl):
3628 if rl._inline:
3628 if rl._inline:
3629 indexfile = getattr(rl, '_indexfile', None)
3629 indexfile = getattr(rl, '_indexfile', None)
3630 if indexfile is None:
3630 if indexfile is None:
3631 # compatibility with <= hg-5.8
3631 # compatibility with <= hg-5.8
3632 indexfile = getattr(rl, 'indexfile')
3632 indexfile = getattr(rl, 'indexfile')
3633 return getsvfs(repo)(indexfile)
3633 return getsvfs(repo)(indexfile)
3634 else:
3634 else:
3635 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3635 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3636 return getsvfs(repo)(datafile)
3636 return getsvfs(repo)(datafile)
3637
3637
3638 def doread():
3638 def doread():
3639 rl.clearcaches()
3639 rl.clearcaches()
3640 for rev in revs:
3640 for rev in revs:
3641 segmentforrevs(rev, rev)
3641 segmentforrevs(rev, rev)
3642
3642
3643 def doreadcachedfh():
3643 def doreadcachedfh():
3644 rl.clearcaches()
3644 rl.clearcaches()
3645 fh = rlfh(rl)
3645 fh = rlfh(rl)
3646 for rev in revs:
3646 for rev in revs:
3647 segmentforrevs(rev, rev, df=fh)
3647 segmentforrevs(rev, rev, df=fh)
3648
3648
3649 def doreadbatch():
3649 def doreadbatch():
3650 rl.clearcaches()
3650 rl.clearcaches()
3651 segmentforrevs(revs[0], revs[-1])
3651 segmentforrevs(revs[0], revs[-1])
3652
3652
3653 def doreadbatchcachedfh():
3653 def doreadbatchcachedfh():
3654 rl.clearcaches()
3654 rl.clearcaches()
3655 fh = rlfh(rl)
3655 fh = rlfh(rl)
3656 segmentforrevs(revs[0], revs[-1], df=fh)
3656 segmentforrevs(revs[0], revs[-1], df=fh)
3657
3657
3658 def dochunk():
3658 def dochunk():
3659 rl.clearcaches()
3659 rl.clearcaches()
3660 fh = rlfh(rl)
3660 fh = rlfh(rl)
3661 for rev in revs:
3661 for rev in revs:
3662 rl._chunk(rev, df=fh)
3662 rl._chunk(rev, df=fh)
3663
3663
3664 chunks = [None]
3664 chunks = [None]
3665
3665
3666 def dochunkbatch():
3666 def dochunkbatch():
3667 rl.clearcaches()
3667 rl.clearcaches()
3668 fh = rlfh(rl)
3668 fh = rlfh(rl)
3669 # Save chunks as a side-effect.
3669 # Save chunks as a side-effect.
3670 chunks[0] = rl._chunks(revs, df=fh)
3670 chunks[0] = rl._chunks(revs, df=fh)
3671
3671
3672 def docompress(compressor):
3672 def docompress(compressor):
3673 rl.clearcaches()
3673 rl.clearcaches()
3674
3674
3675 try:
3675 try:
3676 # Swap in the requested compression engine.
3676 # Swap in the requested compression engine.
3677 oldcompressor = rl._compressor
3677 oldcompressor = rl._compressor
3678 rl._compressor = compressor
3678 rl._compressor = compressor
3679 for chunk in chunks[0]:
3679 for chunk in chunks[0]:
3680 rl.compress(chunk)
3680 rl.compress(chunk)
3681 finally:
3681 finally:
3682 rl._compressor = oldcompressor
3682 rl._compressor = oldcompressor
3683
3683
3684 benches = [
3684 benches = [
3685 (lambda: doread(), b'read'),
3685 (lambda: doread(), b'read'),
3686 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3686 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3687 (lambda: doreadbatch(), b'read batch'),
3687 (lambda: doreadbatch(), b'read batch'),
3688 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3688 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3689 (lambda: dochunk(), b'chunk'),
3689 (lambda: dochunk(), b'chunk'),
3690 (lambda: dochunkbatch(), b'chunk batch'),
3690 (lambda: dochunkbatch(), b'chunk batch'),
3691 ]
3691 ]
3692
3692
3693 for engine in sorted(engines):
3693 for engine in sorted(engines):
3694 compressor = util.compengines[engine].revlogcompressor()
3694 compressor = util.compengines[engine].revlogcompressor()
3695 benches.append(
3695 benches.append(
3696 (
3696 (
3697 functools.partial(docompress, compressor),
3697 functools.partial(docompress, compressor),
3698 b'compress w/ %s' % engine,
3698 b'compress w/ %s' % engine,
3699 )
3699 )
3700 )
3700 )
3701
3701
3702 for fn, title in benches:
3702 for fn, title in benches:
3703 timer, fm = gettimer(ui, opts)
3703 timer, fm = gettimer(ui, opts)
3704 timer(fn, title=title)
3704 timer(fn, title=title)
3705 fm.end()
3705 fm.end()
3706
3706
3707
3707
3708 @command(
3708 @command(
3709 b'perf::revlogrevision|perfrevlogrevision',
3709 b'perf::revlogrevision|perfrevlogrevision',
3710 revlogopts
3710 revlogopts
3711 + formatteropts
3711 + formatteropts
3712 + [(b'', b'cache', False, b'use caches instead of clearing')],
3712 + [(b'', b'cache', False, b'use caches instead of clearing')],
3713 b'-c|-m|FILE REV',
3713 b'-c|-m|FILE REV',
3714 )
3714 )
3715 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3715 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3716 """Benchmark obtaining a revlog revision.
3716 """Benchmark obtaining a revlog revision.
3717
3717
3718 Obtaining a revlog revision consists of roughly the following steps:
3718 Obtaining a revlog revision consists of roughly the following steps:
3719
3719
3720 1. Compute the delta chain
3720 1. Compute the delta chain
3721 2. Slice the delta chain if applicable
3721 2. Slice the delta chain if applicable
3722 3. Obtain the raw chunks for that delta chain
3722 3. Obtain the raw chunks for that delta chain
3723 4. Decompress each raw chunk
3723 4. Decompress each raw chunk
3724 5. Apply binary patches to obtain fulltext
3724 5. Apply binary patches to obtain fulltext
3725 6. Verify hash of fulltext
3725 6. Verify hash of fulltext
3726
3726
3727 This command measures the time spent in each of these phases.
3727 This command measures the time spent in each of these phases.
3728 """
3728 """
3729 opts = _byteskwargs(opts)
3729 opts = _byteskwargs(opts)
3730
3730
3731 if opts.get(b'changelog') or opts.get(b'manifest'):
3731 if opts.get(b'changelog') or opts.get(b'manifest'):
3732 file_, rev = None, file_
3732 file_, rev = None, file_
3733 elif rev is None:
3733 elif rev is None:
3734 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3734 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3735
3735
3736 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3736 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3737
3737
3738 # _chunkraw was renamed to _getsegmentforrevs.
3738 # _chunkraw was renamed to _getsegmentforrevs.
3739 try:
3739 try:
3740 segmentforrevs = r._getsegmentforrevs
3740 segmentforrevs = r._getsegmentforrevs
3741 except AttributeError:
3741 except AttributeError:
3742 segmentforrevs = r._chunkraw
3742 segmentforrevs = r._chunkraw
3743
3743
3744 node = r.lookup(rev)
3744 node = r.lookup(rev)
3745 rev = r.rev(node)
3745 rev = r.rev(node)
3746
3746
3747 def getrawchunks(data, chain):
3747 def getrawchunks(data, chain):
3748 start = r.start
3748 start = r.start
3749 length = r.length
3749 length = r.length
3750 inline = r._inline
3750 inline = r._inline
3751 try:
3751 try:
3752 iosize = r.index.entry_size
3752 iosize = r.index.entry_size
3753 except AttributeError:
3753 except AttributeError:
3754 iosize = r._io.size
3754 iosize = r._io.size
3755 buffer = util.buffer
3755 buffer = util.buffer
3756
3756
3757 chunks = []
3757 chunks = []
3758 ladd = chunks.append
3758 ladd = chunks.append
3759 for idx, item in enumerate(chain):
3759 for idx, item in enumerate(chain):
3760 offset = start(item[0])
3760 offset = start(item[0])
3761 bits = data[idx]
3761 bits = data[idx]
3762 for rev in item:
3762 for rev in item:
3763 chunkstart = start(rev)
3763 chunkstart = start(rev)
3764 if inline:
3764 if inline:
3765 chunkstart += (rev + 1) * iosize
3765 chunkstart += (rev + 1) * iosize
3766 chunklength = length(rev)
3766 chunklength = length(rev)
3767 ladd(buffer(bits, chunkstart - offset, chunklength))
3767 ladd(buffer(bits, chunkstart - offset, chunklength))
3768
3768
3769 return chunks
3769 return chunks
3770
3770
3771 def dodeltachain(rev):
3771 def dodeltachain(rev):
3772 if not cache:
3772 if not cache:
3773 r.clearcaches()
3773 r.clearcaches()
3774 r._deltachain(rev)
3774 r._deltachain(rev)
3775
3775
3776 def doread(chain):
3776 def doread(chain):
3777 if not cache:
3777 if not cache:
3778 r.clearcaches()
3778 r.clearcaches()
3779 for item in slicedchain:
3779 for item in slicedchain:
3780 segmentforrevs(item[0], item[-1])
3780 segmentforrevs(item[0], item[-1])
3781
3781
3782 def doslice(r, chain, size):
3782 def doslice(r, chain, size):
3783 for s in slicechunk(r, chain, targetsize=size):
3783 for s in slicechunk(r, chain, targetsize=size):
3784 pass
3784 pass
3785
3785
3786 def dorawchunks(data, chain):
3786 def dorawchunks(data, chain):
3787 if not cache:
3787 if not cache:
3788 r.clearcaches()
3788 r.clearcaches()
3789 getrawchunks(data, chain)
3789 getrawchunks(data, chain)
3790
3790
3791 def dodecompress(chunks):
3791 def dodecompress(chunks):
3792 decomp = r.decompress
3792 decomp = r.decompress
3793 for chunk in chunks:
3793 for chunk in chunks:
3794 decomp(chunk)
3794 decomp(chunk)
3795
3795
3796 def dopatch(text, bins):
3796 def dopatch(text, bins):
3797 if not cache:
3797 if not cache:
3798 r.clearcaches()
3798 r.clearcaches()
3799 mdiff.patches(text, bins)
3799 mdiff.patches(text, bins)
3800
3800
3801 def dohash(text):
3801 def dohash(text):
3802 if not cache:
3802 if not cache:
3803 r.clearcaches()
3803 r.clearcaches()
3804 r.checkhash(text, node, rev=rev)
3804 r.checkhash(text, node, rev=rev)
3805
3805
3806 def dorevision():
3806 def dorevision():
3807 if not cache:
3807 if not cache:
3808 r.clearcaches()
3808 r.clearcaches()
3809 r.revision(node)
3809 r.revision(node)
3810
3810
3811 try:
3811 try:
3812 from mercurial.revlogutils.deltas import slicechunk
3812 from mercurial.revlogutils.deltas import slicechunk
3813 except ImportError:
3813 except ImportError:
3814 slicechunk = getattr(revlog, '_slicechunk', None)
3814 slicechunk = getattr(revlog, '_slicechunk', None)
3815
3815
3816 size = r.length(rev)
3816 size = r.length(rev)
3817 chain = r._deltachain(rev)[0]
3817 chain = r._deltachain(rev)[0]
3818 if not getattr(r, '_withsparseread', False):
3818 if not getattr(r, '_withsparseread', False):
3819 slicedchain = (chain,)
3819 slicedchain = (chain,)
3820 else:
3820 else:
3821 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3821 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3822 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3822 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3823 rawchunks = getrawchunks(data, slicedchain)
3823 rawchunks = getrawchunks(data, slicedchain)
3824 bins = r._chunks(chain)
3824 bins = r._chunks(chain)
3825 text = bytes(bins[0])
3825 text = bytes(bins[0])
3826 bins = bins[1:]
3826 bins = bins[1:]
3827 text = mdiff.patches(text, bins)
3827 text = mdiff.patches(text, bins)
3828
3828
3829 benches = [
3829 benches = [
3830 (lambda: dorevision(), b'full'),
3830 (lambda: dorevision(), b'full'),
3831 (lambda: dodeltachain(rev), b'deltachain'),
3831 (lambda: dodeltachain(rev), b'deltachain'),
3832 (lambda: doread(chain), b'read'),
3832 (lambda: doread(chain), b'read'),
3833 ]
3833 ]
3834
3834
3835 if getattr(r, '_withsparseread', False):
3835 if getattr(r, '_withsparseread', False):
3836 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3836 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3837 benches.append(slicing)
3837 benches.append(slicing)
3838
3838
3839 benches.extend(
3839 benches.extend(
3840 [
3840 [
3841 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3841 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3842 (lambda: dodecompress(rawchunks), b'decompress'),
3842 (lambda: dodecompress(rawchunks), b'decompress'),
3843 (lambda: dopatch(text, bins), b'patch'),
3843 (lambda: dopatch(text, bins), b'patch'),
3844 (lambda: dohash(text), b'hash'),
3844 (lambda: dohash(text), b'hash'),
3845 ]
3845 ]
3846 )
3846 )
3847
3847
3848 timer, fm = gettimer(ui, opts)
3848 timer, fm = gettimer(ui, opts)
3849 for fn, title in benches:
3849 for fn, title in benches:
3850 timer(fn, title=title)
3850 timer(fn, title=title)
3851 fm.end()
3851 fm.end()
3852
3852
3853
3853
3854 @command(
3854 @command(
3855 b'perf::revset|perfrevset',
3855 b'perf::revset|perfrevset',
3856 [
3856 [
3857 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3857 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3858 (b'', b'contexts', False, b'obtain changectx for each revision'),
3858 (b'', b'contexts', False, b'obtain changectx for each revision'),
3859 ]
3859 ]
3860 + formatteropts,
3860 + formatteropts,
3861 b"REVSET",
3861 b"REVSET",
3862 )
3862 )
3863 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3863 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3864 """benchmark the execution time of a revset
3864 """benchmark the execution time of a revset
3865
3865
3866 Use the --clean option if need to evaluate the impact of build volatile
3866 Use the --clean option if need to evaluate the impact of build volatile
3867 revisions set cache on the revset execution. Volatile cache hold filtered
3867 revisions set cache on the revset execution. Volatile cache hold filtered
3868 and obsolete related cache."""
3868 and obsolete related cache."""
3869 opts = _byteskwargs(opts)
3869 opts = _byteskwargs(opts)
3870
3870
3871 timer, fm = gettimer(ui, opts)
3871 timer, fm = gettimer(ui, opts)
3872
3872
3873 def d():
3873 def d():
3874 if clear:
3874 if clear:
3875 repo.invalidatevolatilesets()
3875 repo.invalidatevolatilesets()
3876 if contexts:
3876 if contexts:
3877 for ctx in repo.set(expr):
3877 for ctx in repo.set(expr):
3878 pass
3878 pass
3879 else:
3879 else:
3880 for r in repo.revs(expr):
3880 for r in repo.revs(expr):
3881 pass
3881 pass
3882
3882
3883 timer(d)
3883 timer(d)
3884 fm.end()
3884 fm.end()
3885
3885
3886
3886
3887 @command(
3887 @command(
3888 b'perf::volatilesets|perfvolatilesets',
3888 b'perf::volatilesets|perfvolatilesets',
3889 [
3889 [
3890 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3890 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3891 ]
3891 ]
3892 + formatteropts,
3892 + formatteropts,
3893 )
3893 )
3894 def perfvolatilesets(ui, repo, *names, **opts):
3894 def perfvolatilesets(ui, repo, *names, **opts):
3895 """benchmark the computation of various volatile set
3895 """benchmark the computation of various volatile set
3896
3896
3897 Volatile set computes element related to filtering and obsolescence."""
3897 Volatile set computes element related to filtering and obsolescence."""
3898 opts = _byteskwargs(opts)
3898 opts = _byteskwargs(opts)
3899 timer, fm = gettimer(ui, opts)
3899 timer, fm = gettimer(ui, opts)
3900 repo = repo.unfiltered()
3900 repo = repo.unfiltered()
3901
3901
3902 def getobs(name):
3902 def getobs(name):
3903 def d():
3903 def d():
3904 repo.invalidatevolatilesets()
3904 repo.invalidatevolatilesets()
3905 if opts[b'clear_obsstore']:
3905 if opts[b'clear_obsstore']:
3906 clearfilecache(repo, b'obsstore')
3906 clearfilecache(repo, b'obsstore')
3907 obsolete.getrevs(repo, name)
3907 obsolete.getrevs(repo, name)
3908
3908
3909 return d
3909 return d
3910
3910
3911 allobs = sorted(obsolete.cachefuncs)
3911 allobs = sorted(obsolete.cachefuncs)
3912 if names:
3912 if names:
3913 allobs = [n for n in allobs if n in names]
3913 allobs = [n for n in allobs if n in names]
3914
3914
3915 for name in allobs:
3915 for name in allobs:
3916 timer(getobs(name), title=name)
3916 timer(getobs(name), title=name)
3917
3917
3918 def getfiltered(name):
3918 def getfiltered(name):
3919 def d():
3919 def d():
3920 repo.invalidatevolatilesets()
3920 repo.invalidatevolatilesets()
3921 if opts[b'clear_obsstore']:
3921 if opts[b'clear_obsstore']:
3922 clearfilecache(repo, b'obsstore')
3922 clearfilecache(repo, b'obsstore')
3923 repoview.filterrevs(repo, name)
3923 repoview.filterrevs(repo, name)
3924
3924
3925 return d
3925 return d
3926
3926
3927 allfilter = sorted(repoview.filtertable)
3927 allfilter = sorted(repoview.filtertable)
3928 if names:
3928 if names:
3929 allfilter = [n for n in allfilter if n in names]
3929 allfilter = [n for n in allfilter if n in names]
3930
3930
3931 for name in allfilter:
3931 for name in allfilter:
3932 timer(getfiltered(name), title=name)
3932 timer(getfiltered(name), title=name)
3933 fm.end()
3933 fm.end()
3934
3934
3935
3935
3936 @command(
3936 @command(
3937 b'perf::branchmap|perfbranchmap',
3937 b'perf::branchmap|perfbranchmap',
3938 [
3938 [
3939 (b'f', b'full', False, b'Includes build time of subset'),
3939 (b'f', b'full', False, b'Includes build time of subset'),
3940 (
3940 (
3941 b'',
3941 b'',
3942 b'clear-revbranch',
3942 b'clear-revbranch',
3943 False,
3943 False,
3944 b'purge the revbranch cache between computation',
3944 b'purge the revbranch cache between computation',
3945 ),
3945 ),
3946 ]
3946 ]
3947 + formatteropts,
3947 + formatteropts,
3948 )
3948 )
3949 def perfbranchmap(ui, repo, *filternames, **opts):
3949 def perfbranchmap(ui, repo, *filternames, **opts):
3950 """benchmark the update of a branchmap
3950 """benchmark the update of a branchmap
3951
3951
3952 This benchmarks the full repo.branchmap() call with read and write disabled
3952 This benchmarks the full repo.branchmap() call with read and write disabled
3953 """
3953 """
3954 opts = _byteskwargs(opts)
3954 opts = _byteskwargs(opts)
3955 full = opts.get(b"full", False)
3955 full = opts.get(b"full", False)
3956 clear_revbranch = opts.get(b"clear_revbranch", False)
3956 clear_revbranch = opts.get(b"clear_revbranch", False)
3957 timer, fm = gettimer(ui, opts)
3957 timer, fm = gettimer(ui, opts)
3958
3958
3959 def getbranchmap(filtername):
3959 def getbranchmap(filtername):
3960 """generate a benchmark function for the filtername"""
3960 """generate a benchmark function for the filtername"""
3961 if filtername is None:
3961 if filtername is None:
3962 view = repo
3962 view = repo
3963 else:
3963 else:
3964 view = repo.filtered(filtername)
3964 view = repo.filtered(filtername)
3965 if util.safehasattr(view._branchcaches, '_per_filter'):
3965 if util.safehasattr(view._branchcaches, '_per_filter'):
3966 filtered = view._branchcaches._per_filter
3966 filtered = view._branchcaches._per_filter
3967 else:
3967 else:
3968 # older versions
3968 # older versions
3969 filtered = view._branchcaches
3969 filtered = view._branchcaches
3970
3970
3971 def d():
3971 def d():
3972 if clear_revbranch:
3972 if clear_revbranch:
3973 repo.revbranchcache()._clear()
3973 repo.revbranchcache()._clear()
3974 if full:
3974 if full:
3975 view._branchcaches.clear()
3975 view._branchcaches.clear()
3976 else:
3976 else:
3977 filtered.pop(filtername, None)
3977 filtered.pop(filtername, None)
3978 view.branchmap()
3978 view.branchmap()
3979
3979
3980 return d
3980 return d
3981
3981
3982 # add filter in smaller subset to bigger subset
3982 # add filter in smaller subset to bigger subset
3983 possiblefilters = set(repoview.filtertable)
3983 possiblefilters = set(repoview.filtertable)
3984 if filternames:
3984 if filternames:
3985 possiblefilters &= set(filternames)
3985 possiblefilters &= set(filternames)
3986 subsettable = getbranchmapsubsettable()
3986 subsettable = getbranchmapsubsettable()
3987 allfilters = []
3987 allfilters = []
3988 while possiblefilters:
3988 while possiblefilters:
3989 for name in possiblefilters:
3989 for name in possiblefilters:
3990 subset = subsettable.get(name)
3990 subset = subsettable.get(name)
3991 if subset not in possiblefilters:
3991 if subset not in possiblefilters:
3992 break
3992 break
3993 else:
3993 else:
3994 assert False, b'subset cycle %s!' % possiblefilters
3994 assert False, b'subset cycle %s!' % possiblefilters
3995 allfilters.append(name)
3995 allfilters.append(name)
3996 possiblefilters.remove(name)
3996 possiblefilters.remove(name)
3997
3997
3998 # warm the cache
3998 # warm the cache
3999 if not full:
3999 if not full:
4000 for name in allfilters:
4000 for name in allfilters:
4001 repo.filtered(name).branchmap()
4001 repo.filtered(name).branchmap()
4002 if not filternames or b'unfiltered' in filternames:
4002 if not filternames or b'unfiltered' in filternames:
4003 # add unfiltered
4003 # add unfiltered
4004 allfilters.append(None)
4004 allfilters.append(None)
4005
4005
4006 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4006 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4007 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4007 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4008 branchcacheread.set(classmethod(lambda *args: None))
4008 branchcacheread.set(classmethod(lambda *args: None))
4009 else:
4009 else:
4010 # older versions
4010 # older versions
4011 branchcacheread = safeattrsetter(branchmap, b'read')
4011 branchcacheread = safeattrsetter(branchmap, b'read')
4012 branchcacheread.set(lambda *args: None)
4012 branchcacheread.set(lambda *args: None)
4013 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4013 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4014 branchcachewrite.set(lambda *args: None)
4014 branchcachewrite.set(lambda *args: None)
4015 try:
4015 try:
4016 for name in allfilters:
4016 for name in allfilters:
4017 printname = name
4017 printname = name
4018 if name is None:
4018 if name is None:
4019 printname = b'unfiltered'
4019 printname = b'unfiltered'
4020 timer(getbranchmap(name), title=printname)
4020 timer(getbranchmap(name), title=printname)
4021 finally:
4021 finally:
4022 branchcacheread.restore()
4022 branchcacheread.restore()
4023 branchcachewrite.restore()
4023 branchcachewrite.restore()
4024 fm.end()
4024 fm.end()
4025
4025
4026
4026
4027 @command(
4027 @command(
4028 b'perf::branchmapupdate|perfbranchmapupdate',
4028 b'perf::branchmapupdate|perfbranchmapupdate',
4029 [
4029 [
4030 (b'', b'base', [], b'subset of revision to start from'),
4030 (b'', b'base', [], b'subset of revision to start from'),
4031 (b'', b'target', [], b'subset of revision to end with'),
4031 (b'', b'target', [], b'subset of revision to end with'),
4032 (b'', b'clear-caches', False, b'clear cache between each runs'),
4032 (b'', b'clear-caches', False, b'clear cache between each runs'),
4033 ]
4033 ]
4034 + formatteropts,
4034 + formatteropts,
4035 )
4035 )
4036 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4036 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4037 """benchmark branchmap update from for <base> revs to <target> revs
4037 """benchmark branchmap update from for <base> revs to <target> revs
4038
4038
4039 If `--clear-caches` is passed, the following items will be reset before
4039 If `--clear-caches` is passed, the following items will be reset before
4040 each update:
4040 each update:
4041 * the changelog instance and associated indexes
4041 * the changelog instance and associated indexes
4042 * the rev-branch-cache instance
4042 * the rev-branch-cache instance
4043
4043
4044 Examples:
4044 Examples:
4045
4045
4046 # update for the one last revision
4046 # update for the one last revision
4047 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4047 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4048
4048
4049 $ update for change coming with a new branch
4049 $ update for change coming with a new branch
4050 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4050 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4051 """
4051 """
4052 from mercurial import branchmap
4052 from mercurial import branchmap
4053 from mercurial import repoview
4053 from mercurial import repoview
4054
4054
4055 opts = _byteskwargs(opts)
4055 opts = _byteskwargs(opts)
4056 timer, fm = gettimer(ui, opts)
4056 timer, fm = gettimer(ui, opts)
4057 clearcaches = opts[b'clear_caches']
4057 clearcaches = opts[b'clear_caches']
4058 unfi = repo.unfiltered()
4058 unfi = repo.unfiltered()
4059 x = [None] # used to pass data between closure
4059 x = [None] # used to pass data between closure
4060
4060
4061 # we use a `list` here to avoid possible side effect from smartset
4061 # we use a `list` here to avoid possible side effect from smartset
4062 baserevs = list(scmutil.revrange(repo, base))
4062 baserevs = list(scmutil.revrange(repo, base))
4063 targetrevs = list(scmutil.revrange(repo, target))
4063 targetrevs = list(scmutil.revrange(repo, target))
4064 if not baserevs:
4064 if not baserevs:
4065 raise error.Abort(b'no revisions selected for --base')
4065 raise error.Abort(b'no revisions selected for --base')
4066 if not targetrevs:
4066 if not targetrevs:
4067 raise error.Abort(b'no revisions selected for --target')
4067 raise error.Abort(b'no revisions selected for --target')
4068
4068
4069 # make sure the target branchmap also contains the one in the base
4069 # make sure the target branchmap also contains the one in the base
4070 targetrevs = list(set(baserevs) | set(targetrevs))
4070 targetrevs = list(set(baserevs) | set(targetrevs))
4071 targetrevs.sort()
4071 targetrevs.sort()
4072
4072
4073 cl = repo.changelog
4073 cl = repo.changelog
4074 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4074 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4075 allbaserevs.sort()
4075 allbaserevs.sort()
4076 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4076 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4077
4077
4078 newrevs = list(alltargetrevs.difference(allbaserevs))
4078 newrevs = list(alltargetrevs.difference(allbaserevs))
4079 newrevs.sort()
4079 newrevs.sort()
4080
4080
4081 allrevs = frozenset(unfi.changelog.revs())
4081 allrevs = frozenset(unfi.changelog.revs())
4082 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4082 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4083 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4083 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4084
4084
4085 def basefilter(repo, visibilityexceptions=None):
4085 def basefilter(repo, visibilityexceptions=None):
4086 return basefilterrevs
4086 return basefilterrevs
4087
4087
4088 def targetfilter(repo, visibilityexceptions=None):
4088 def targetfilter(repo, visibilityexceptions=None):
4089 return targetfilterrevs
4089 return targetfilterrevs
4090
4090
4091 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4091 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4092 ui.status(msg % (len(allbaserevs), len(newrevs)))
4092 ui.status(msg % (len(allbaserevs), len(newrevs)))
4093 if targetfilterrevs:
4093 if targetfilterrevs:
4094 msg = b'(%d revisions still filtered)\n'
4094 msg = b'(%d revisions still filtered)\n'
4095 ui.status(msg % len(targetfilterrevs))
4095 ui.status(msg % len(targetfilterrevs))
4096
4096
4097 try:
4097 try:
4098 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4098 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4099 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4099 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4100
4100
4101 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4101 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4102 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4102 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4103
4103
4104 # try to find an existing branchmap to reuse
4104 # try to find an existing branchmap to reuse
4105 subsettable = getbranchmapsubsettable()
4105 subsettable = getbranchmapsubsettable()
4106 candidatefilter = subsettable.get(None)
4106 candidatefilter = subsettable.get(None)
4107 while candidatefilter is not None:
4107 while candidatefilter is not None:
4108 candidatebm = repo.filtered(candidatefilter).branchmap()
4108 candidatebm = repo.filtered(candidatefilter).branchmap()
4109 if candidatebm.validfor(baserepo):
4109 if candidatebm.validfor(baserepo):
4110 filtered = repoview.filterrevs(repo, candidatefilter)
4110 filtered = repoview.filterrevs(repo, candidatefilter)
4111 missing = [r for r in allbaserevs if r in filtered]
4111 missing = [r for r in allbaserevs if r in filtered]
4112 base = candidatebm.copy()
4112 base = candidatebm.copy()
4113 base.update(baserepo, missing)
4113 base.update(baserepo, missing)
4114 break
4114 break
4115 candidatefilter = subsettable.get(candidatefilter)
4115 candidatefilter = subsettable.get(candidatefilter)
4116 else:
4116 else:
4117 # no suitable subset where found
4117 # no suitable subset where found
4118 base = branchmap.branchcache()
4118 base = branchmap.branchcache()
4119 base.update(baserepo, allbaserevs)
4119 base.update(baserepo, allbaserevs)
4120
4120
4121 def setup():
4121 def setup():
4122 x[0] = base.copy()
4122 x[0] = base.copy()
4123 if clearcaches:
4123 if clearcaches:
4124 unfi._revbranchcache = None
4124 unfi._revbranchcache = None
4125 clearchangelog(repo)
4125 clearchangelog(repo)
4126
4126
4127 def bench():
4127 def bench():
4128 x[0].update(targetrepo, newrevs)
4128 x[0].update(targetrepo, newrevs)
4129
4129
4130 timer(bench, setup=setup)
4130 timer(bench, setup=setup)
4131 fm.end()
4131 fm.end()
4132 finally:
4132 finally:
4133 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4133 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4134 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4134 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4135
4135
4136
4136
4137 @command(
4137 @command(
4138 b'perf::branchmapload|perfbranchmapload',
4138 b'perf::branchmapload|perfbranchmapload',
4139 [
4139 [
4140 (b'f', b'filter', b'', b'Specify repoview filter'),
4140 (b'f', b'filter', b'', b'Specify repoview filter'),
4141 (b'', b'list', False, b'List brachmap filter caches'),
4141 (b'', b'list', False, b'List brachmap filter caches'),
4142 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4142 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4143 ]
4143 ]
4144 + formatteropts,
4144 + formatteropts,
4145 )
4145 )
4146 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4146 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4147 """benchmark reading the branchmap"""
4147 """benchmark reading the branchmap"""
4148 opts = _byteskwargs(opts)
4148 opts = _byteskwargs(opts)
4149 clearrevlogs = opts[b'clear_revlogs']
4149 clearrevlogs = opts[b'clear_revlogs']
4150
4150
4151 if list:
4151 if list:
4152 for name, kind, st in repo.cachevfs.readdir(stat=True):
4152 for name, kind, st in repo.cachevfs.readdir(stat=True):
4153 if name.startswith(b'branch2'):
4153 if name.startswith(b'branch2'):
4154 filtername = name.partition(b'-')[2] or b'unfiltered'
4154 filtername = name.partition(b'-')[2] or b'unfiltered'
4155 ui.status(
4155 ui.status(
4156 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4156 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4157 )
4157 )
4158 return
4158 return
4159 if not filter:
4159 if not filter:
4160 filter = None
4160 filter = None
4161 subsettable = getbranchmapsubsettable()
4161 subsettable = getbranchmapsubsettable()
4162 if filter is None:
4162 if filter is None:
4163 repo = repo.unfiltered()
4163 repo = repo.unfiltered()
4164 else:
4164 else:
4165 repo = repoview.repoview(repo, filter)
4165 repo = repoview.repoview(repo, filter)
4166
4166
4167 repo.branchmap() # make sure we have a relevant, up to date branchmap
4167 repo.branchmap() # make sure we have a relevant, up to date branchmap
4168
4168
4169 try:
4169 try:
4170 fromfile = branchmap.branchcache.fromfile
4170 fromfile = branchmap.branchcache.fromfile
4171 except AttributeError:
4171 except AttributeError:
4172 # older versions
4172 # older versions
4173 fromfile = branchmap.read
4173 fromfile = branchmap.read
4174
4174
4175 currentfilter = filter
4175 currentfilter = filter
4176 # try once without timer, the filter may not be cached
4176 # try once without timer, the filter may not be cached
4177 while fromfile(repo) is None:
4177 while fromfile(repo) is None:
4178 currentfilter = subsettable.get(currentfilter)
4178 currentfilter = subsettable.get(currentfilter)
4179 if currentfilter is None:
4179 if currentfilter is None:
4180 raise error.Abort(
4180 raise error.Abort(
4181 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4181 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4182 )
4182 )
4183 repo = repo.filtered(currentfilter)
4183 repo = repo.filtered(currentfilter)
4184 timer, fm = gettimer(ui, opts)
4184 timer, fm = gettimer(ui, opts)
4185
4185
4186 def setup():
4186 def setup():
4187 if clearrevlogs:
4187 if clearrevlogs:
4188 clearchangelog(repo)
4188 clearchangelog(repo)
4189
4189
4190 def bench():
4190 def bench():
4191 fromfile(repo)
4191 fromfile(repo)
4192
4192
4193 timer(bench, setup=setup)
4193 timer(bench, setup=setup)
4194 fm.end()
4194 fm.end()
4195
4195
4196
4196
4197 @command(b'perf::loadmarkers|perfloadmarkers')
4197 @command(b'perf::loadmarkers|perfloadmarkers')
4198 def perfloadmarkers(ui, repo):
4198 def perfloadmarkers(ui, repo):
4199 """benchmark the time to parse the on-disk markers for a repo
4199 """benchmark the time to parse the on-disk markers for a repo
4200
4200
4201 Result is the number of markers in the repo."""
4201 Result is the number of markers in the repo."""
4202 timer, fm = gettimer(ui)
4202 timer, fm = gettimer(ui)
4203 svfs = getsvfs(repo)
4203 svfs = getsvfs(repo)
4204 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4204 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4205 fm.end()
4205 fm.end()
4206
4206
4207
4207
4208 @command(
4208 @command(
4209 b'perf::lrucachedict|perflrucachedict',
4209 b'perf::lrucachedict|perflrucachedict',
4210 formatteropts
4210 formatteropts
4211 + [
4211 + [
4212 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4212 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4213 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4213 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4214 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4214 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4215 (b'', b'size', 4, b'size of cache'),
4215 (b'', b'size', 4, b'size of cache'),
4216 (b'', b'gets', 10000, b'number of key lookups'),
4216 (b'', b'gets', 10000, b'number of key lookups'),
4217 (b'', b'sets', 10000, b'number of key sets'),
4217 (b'', b'sets', 10000, b'number of key sets'),
4218 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4218 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4219 (
4219 (
4220 b'',
4220 b'',
4221 b'mixedgetfreq',
4221 b'mixedgetfreq',
4222 50,
4222 50,
4223 b'frequency of get vs set ops in mixed mode',
4223 b'frequency of get vs set ops in mixed mode',
4224 ),
4224 ),
4225 ],
4225 ],
4226 norepo=True,
4226 norepo=True,
4227 )
4227 )
4228 def perflrucache(
4228 def perflrucache(
4229 ui,
4229 ui,
4230 mincost=0,
4230 mincost=0,
4231 maxcost=100,
4231 maxcost=100,
4232 costlimit=0,
4232 costlimit=0,
4233 size=4,
4233 size=4,
4234 gets=10000,
4234 gets=10000,
4235 sets=10000,
4235 sets=10000,
4236 mixed=10000,
4236 mixed=10000,
4237 mixedgetfreq=50,
4237 mixedgetfreq=50,
4238 **opts
4238 **opts
4239 ):
4239 ):
4240 opts = _byteskwargs(opts)
4240 opts = _byteskwargs(opts)
4241
4241
4242 def doinit():
4242 def doinit():
4243 for i in _xrange(10000):
4243 for i in _xrange(10000):
4244 util.lrucachedict(size)
4244 util.lrucachedict(size)
4245
4245
4246 costrange = list(range(mincost, maxcost + 1))
4246 costrange = list(range(mincost, maxcost + 1))
4247
4247
4248 values = []
4248 values = []
4249 for i in _xrange(size):
4249 for i in _xrange(size):
4250 values.append(random.randint(0, _maxint))
4250 values.append(random.randint(0, _maxint))
4251
4251
4252 # Get mode fills the cache and tests raw lookup performance with no
4252 # Get mode fills the cache and tests raw lookup performance with no
4253 # eviction.
4253 # eviction.
4254 getseq = []
4254 getseq = []
4255 for i in _xrange(gets):
4255 for i in _xrange(gets):
4256 getseq.append(random.choice(values))
4256 getseq.append(random.choice(values))
4257
4257
4258 def dogets():
4258 def dogets():
4259 d = util.lrucachedict(size)
4259 d = util.lrucachedict(size)
4260 for v in values:
4260 for v in values:
4261 d[v] = v
4261 d[v] = v
4262 for key in getseq:
4262 for key in getseq:
4263 value = d[key]
4263 value = d[key]
4264 value # silence pyflakes warning
4264 value # silence pyflakes warning
4265
4265
4266 def dogetscost():
4266 def dogetscost():
4267 d = util.lrucachedict(size, maxcost=costlimit)
4267 d = util.lrucachedict(size, maxcost=costlimit)
4268 for i, v in enumerate(values):
4268 for i, v in enumerate(values):
4269 d.insert(v, v, cost=costs[i])
4269 d.insert(v, v, cost=costs[i])
4270 for key in getseq:
4270 for key in getseq:
4271 try:
4271 try:
4272 value = d[key]
4272 value = d[key]
4273 value # silence pyflakes warning
4273 value # silence pyflakes warning
4274 except KeyError:
4274 except KeyError:
4275 pass
4275 pass
4276
4276
4277 # Set mode tests insertion speed with cache eviction.
4277 # Set mode tests insertion speed with cache eviction.
4278 setseq = []
4278 setseq = []
4279 costs = []
4279 costs = []
4280 for i in _xrange(sets):
4280 for i in _xrange(sets):
4281 setseq.append(random.randint(0, _maxint))
4281 setseq.append(random.randint(0, _maxint))
4282 costs.append(random.choice(costrange))
4282 costs.append(random.choice(costrange))
4283
4283
4284 def doinserts():
4284 def doinserts():
4285 d = util.lrucachedict(size)
4285 d = util.lrucachedict(size)
4286 for v in setseq:
4286 for v in setseq:
4287 d.insert(v, v)
4287 d.insert(v, v)
4288
4288
4289 def doinsertscost():
4289 def doinsertscost():
4290 d = util.lrucachedict(size, maxcost=costlimit)
4290 d = util.lrucachedict(size, maxcost=costlimit)
4291 for i, v in enumerate(setseq):
4291 for i, v in enumerate(setseq):
4292 d.insert(v, v, cost=costs[i])
4292 d.insert(v, v, cost=costs[i])
4293
4293
4294 def dosets():
4294 def dosets():
4295 d = util.lrucachedict(size)
4295 d = util.lrucachedict(size)
4296 for v in setseq:
4296 for v in setseq:
4297 d[v] = v
4297 d[v] = v
4298
4298
4299 # Mixed mode randomly performs gets and sets with eviction.
4299 # Mixed mode randomly performs gets and sets with eviction.
4300 mixedops = []
4300 mixedops = []
4301 for i in _xrange(mixed):
4301 for i in _xrange(mixed):
4302 r = random.randint(0, 100)
4302 r = random.randint(0, 100)
4303 if r < mixedgetfreq:
4303 if r < mixedgetfreq:
4304 op = 0
4304 op = 0
4305 else:
4305 else:
4306 op = 1
4306 op = 1
4307
4307
4308 mixedops.append(
4308 mixedops.append(
4309 (op, random.randint(0, size * 2), random.choice(costrange))
4309 (op, random.randint(0, size * 2), random.choice(costrange))
4310 )
4310 )
4311
4311
4312 def domixed():
4312 def domixed():
4313 d = util.lrucachedict(size)
4313 d = util.lrucachedict(size)
4314
4314
4315 for op, v, cost in mixedops:
4315 for op, v, cost in mixedops:
4316 if op == 0:
4316 if op == 0:
4317 try:
4317 try:
4318 d[v]
4318 d[v]
4319 except KeyError:
4319 except KeyError:
4320 pass
4320 pass
4321 else:
4321 else:
4322 d[v] = v
4322 d[v] = v
4323
4323
4324 def domixedcost():
4324 def domixedcost():
4325 d = util.lrucachedict(size, maxcost=costlimit)
4325 d = util.lrucachedict(size, maxcost=costlimit)
4326
4326
4327 for op, v, cost in mixedops:
4327 for op, v, cost in mixedops:
4328 if op == 0:
4328 if op == 0:
4329 try:
4329 try:
4330 d[v]
4330 d[v]
4331 except KeyError:
4331 except KeyError:
4332 pass
4332 pass
4333 else:
4333 else:
4334 d.insert(v, v, cost=cost)
4334 d.insert(v, v, cost=cost)
4335
4335
4336 benches = [
4336 benches = [
4337 (doinit, b'init'),
4337 (doinit, b'init'),
4338 ]
4338 ]
4339
4339
4340 if costlimit:
4340 if costlimit:
4341 benches.extend(
4341 benches.extend(
4342 [
4342 [
4343 (dogetscost, b'gets w/ cost limit'),
4343 (dogetscost, b'gets w/ cost limit'),
4344 (doinsertscost, b'inserts w/ cost limit'),
4344 (doinsertscost, b'inserts w/ cost limit'),
4345 (domixedcost, b'mixed w/ cost limit'),
4345 (domixedcost, b'mixed w/ cost limit'),
4346 ]
4346 ]
4347 )
4347 )
4348 else:
4348 else:
4349 benches.extend(
4349 benches.extend(
4350 [
4350 [
4351 (dogets, b'gets'),
4351 (dogets, b'gets'),
4352 (doinserts, b'inserts'),
4352 (doinserts, b'inserts'),
4353 (dosets, b'sets'),
4353 (dosets, b'sets'),
4354 (domixed, b'mixed'),
4354 (domixed, b'mixed'),
4355 ]
4355 ]
4356 )
4356 )
4357
4357
4358 for fn, title in benches:
4358 for fn, title in benches:
4359 timer, fm = gettimer(ui, opts)
4359 timer, fm = gettimer(ui, opts)
4360 timer(fn, title=title)
4360 timer(fn, title=title)
4361 fm.end()
4361 fm.end()
4362
4362
4363
4363
4364 @command(
4364 @command(
4365 b'perf::write|perfwrite',
4365 b'perf::write|perfwrite',
4366 formatteropts
4366 formatteropts
4367 + [
4367 + [
4368 (b'', b'write-method', b'write', b'ui write method'),
4368 (b'', b'write-method', b'write', b'ui write method'),
4369 (b'', b'nlines', 100, b'number of lines'),
4369 (b'', b'nlines', 100, b'number of lines'),
4370 (b'', b'nitems', 100, b'number of items (per line)'),
4370 (b'', b'nitems', 100, b'number of items (per line)'),
4371 (b'', b'item', b'x', b'item that is written'),
4371 (b'', b'item', b'x', b'item that is written'),
4372 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4372 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4373 (b'', b'flush-line', None, b'flush after each line'),
4373 (b'', b'flush-line', None, b'flush after each line'),
4374 ],
4374 ],
4375 )
4375 )
4376 def perfwrite(ui, repo, **opts):
4376 def perfwrite(ui, repo, **opts):
4377 """microbenchmark ui.write (and others)"""
4377 """microbenchmark ui.write (and others)"""
4378 opts = _byteskwargs(opts)
4378 opts = _byteskwargs(opts)
4379
4379
4380 write = getattr(ui, _sysstr(opts[b'write_method']))
4380 write = getattr(ui, _sysstr(opts[b'write_method']))
4381 nlines = int(opts[b'nlines'])
4381 nlines = int(opts[b'nlines'])
4382 nitems = int(opts[b'nitems'])
4382 nitems = int(opts[b'nitems'])
4383 item = opts[b'item']
4383 item = opts[b'item']
4384 batch_line = opts.get(b'batch_line')
4384 batch_line = opts.get(b'batch_line')
4385 flush_line = opts.get(b'flush_line')
4385 flush_line = opts.get(b'flush_line')
4386
4386
4387 if batch_line:
4387 if batch_line:
4388 line = item * nitems + b'\n'
4388 line = item * nitems + b'\n'
4389
4389
4390 def benchmark():
4390 def benchmark():
4391 for i in pycompat.xrange(nlines):
4391 for i in pycompat.xrange(nlines):
4392 if batch_line:
4392 if batch_line:
4393 write(line)
4393 write(line)
4394 else:
4394 else:
4395 for i in pycompat.xrange(nitems):
4395 for i in pycompat.xrange(nitems):
4396 write(item)
4396 write(item)
4397 write(b'\n')
4397 write(b'\n')
4398 if flush_line:
4398 if flush_line:
4399 ui.flush()
4399 ui.flush()
4400 ui.flush()
4400 ui.flush()
4401
4401
4402 timer, fm = gettimer(ui, opts)
4402 timer, fm = gettimer(ui, opts)
4403 timer(benchmark)
4403 timer(benchmark)
4404 fm.end()
4404 fm.end()
4405
4405
4406
4406
4407 def uisetup(ui):
4407 def uisetup(ui):
4408 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4408 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4409 commands, b'debugrevlogopts'
4409 commands, b'debugrevlogopts'
4410 ):
4410 ):
4411 # for "historical portability":
4411 # for "historical portability":
4412 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4412 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4413 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4413 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4414 # openrevlog() should cause failure, because it has been
4414 # openrevlog() should cause failure, because it has been
4415 # available since 3.5 (or 49c583ca48c4).
4415 # available since 3.5 (or 49c583ca48c4).
4416 def openrevlog(orig, repo, cmd, file_, opts):
4416 def openrevlog(orig, repo, cmd, file_, opts):
4417 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4417 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4418 raise error.Abort(
4418 raise error.Abort(
4419 b"This version doesn't support --dir option",
4419 b"This version doesn't support --dir option",
4420 hint=b"use 3.5 or later",
4420 hint=b"use 3.5 or later",
4421 )
4421 )
4422 return orig(repo, cmd, file_, opts)
4422 return orig(repo, cmd, file_, opts)
4423
4423
4424 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4424 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4425
4425
4426
4426
4427 @command(
4427 @command(
4428 b'perf::progress|perfprogress',
4428 b'perf::progress|perfprogress',
4429 formatteropts
4429 formatteropts
4430 + [
4430 + [
4431 (b'', b'topic', b'topic', b'topic for progress messages'),
4431 (b'', b'topic', b'topic', b'topic for progress messages'),
4432 (b'c', b'total', 1000000, b'total value we are progressing to'),
4432 (b'c', b'total', 1000000, b'total value we are progressing to'),
4433 ],
4433 ],
4434 norepo=True,
4434 norepo=True,
4435 )
4435 )
4436 def perfprogress(ui, topic=None, total=None, **opts):
4436 def perfprogress(ui, topic=None, total=None, **opts):
4437 """printing of progress bars"""
4437 """printing of progress bars"""
4438 opts = _byteskwargs(opts)
4438 opts = _byteskwargs(opts)
4439
4439
4440 timer, fm = gettimer(ui, opts)
4440 timer, fm = gettimer(ui, opts)
4441
4441
4442 def doprogress():
4442 def doprogress():
4443 with ui.makeprogress(topic, total=total) as progress:
4443 with ui.makeprogress(topic, total=total) as progress:
4444 for i in _xrange(total):
4444 for i in _xrange(total):
4445 progress.increment()
4445 progress.increment()
4446
4446
4447 timer(doprogress)
4447 timer(doprogress)
4448 fm.end()
4448 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now