##// END OF EJS Templates
perf: add a new "context" argument to timer...
marmoute -
r51569:28620be8 default
parent child Browse files
Show More
@@ -1,4329 +1,4337 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", False)
459 displayall = ui.configbool(b"perf", b"all-timing", False)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
536 def noop_context():
537 yield
538
539
535 def _timer(
540 def _timer(
536 fm,
541 fm,
537 func,
542 func,
538 setup=None,
543 setup=None,
544 context=noop_context,
539 title=None,
545 title=None,
540 displayall=False,
546 displayall=False,
541 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
542 prerun=0,
548 prerun=0,
543 profiler=None,
549 profiler=None,
544 ):
550 ):
545 gc.collect()
551 gc.collect()
546 results = []
552 results = []
547 begin = util.timer()
553 begin = util.timer()
548 count = 0
554 count = 0
549 if profiler is None:
555 if profiler is None:
550 profiler = NOOPCTX
556 profiler = NOOPCTX
551 for i in range(prerun):
557 for i in range(prerun):
552 if setup is not None:
558 if setup is not None:
553 setup()
559 setup()
560 with context():
554 func()
561 func()
555 keepgoing = True
562 keepgoing = True
556 while keepgoing:
563 while keepgoing:
557 if setup is not None:
564 if setup is not None:
558 setup()
565 setup()
566 with context():
559 with profiler:
567 with profiler:
560 with timeone() as item:
568 with timeone() as item:
561 r = func()
569 r = func()
562 profiler = NOOPCTX
570 profiler = NOOPCTX
563 count += 1
571 count += 1
564 results.append(item[0])
572 results.append(item[0])
565 cstop = util.timer()
573 cstop = util.timer()
566 # Look for a stop condition.
574 # Look for a stop condition.
567 elapsed = cstop - begin
575 elapsed = cstop - begin
568 for t, mincount in limits:
576 for t, mincount in limits:
569 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
570 keepgoing = False
578 keepgoing = False
571 break
579 break
572
580
573 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
574
582
575
583
576 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
577 count = len(timings)
585 count = len(timings)
578
586
579 fm.startitem()
587 fm.startitem()
580
588
581 if title:
589 if title:
582 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
583 if result:
591 if result:
584 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
585
593
586 def display(role, entry):
594 def display(role, entry):
587 prefix = b''
595 prefix = b''
588 if role != b'best':
596 if role != b'best':
589 prefix = b'%s.' % role
597 prefix = b'%s.' % role
590 fm.plain(b'!')
598 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
604 fm.plain(b'\n')
597
605
598 timings.sort()
606 timings.sort()
599 min_val = timings[0]
607 min_val = timings[0]
600 display(b'best', min_val)
608 display(b'best', min_val)
601 if displayall:
609 if displayall:
602 max_val = timings[-1]
610 max_val = timings[-1]
603 display(b'max', max_val)
611 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
613 display(b'avg', avg)
606 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
607 display(b'median', median)
615 display(b'median', median)
608
616
609
617
610 # utilities for historical portability
618 # utilities for historical portability
611
619
612
620
613 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
614 # for "historical portability":
622 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
617 if v is None:
625 if v is None:
618 return default
626 return default
619 try:
627 try:
620 return int(v)
628 return int(v)
621 except ValueError:
629 except ValueError:
622 raise error.ConfigError(
630 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
632 )
625
633
626
634
627 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
637
630 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
633
641
634 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
636
644
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
640 versions.
648 versions.
641 """
649 """
642 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
643 if ignoremissing:
651 if ignoremissing:
644 return None
652 return None
645 raise error.Abort(
653 raise error.Abort(
646 (
654 (
647 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
656 b" of performance measurement"
649 )
657 )
650 % (name, obj)
658 % (name, obj)
651 )
659 )
652
660
653 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
654
662
655 class attrutil:
663 class attrutil:
656 def set(self, newvalue):
664 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
658
666
659 def restore(self):
667 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
661
669
662 return attrutil()
670 return attrutil()
663
671
664
672
665 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
666
674
667
675
668 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
669 # for "historical portability":
677 # for "historical portability":
670 # subsettable is defined in:
678 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
684 if subsettable:
677 return subsettable
685 return subsettable
678
686
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
689 # doesn't)
682 raise error.Abort(
690 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
685 )
693 )
686
694
687
695
688 def getsvfs(repo):
696 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
698 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
693 if svfs:
701 if svfs:
694 return svfs
702 return svfs
695 else:
703 else:
696 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
697
705
698
706
699 def getvfs(repo):
707 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
709 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
704 if vfs:
712 if vfs:
705 return vfs
713 return vfs
706 else:
714 else:
707 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
708
716
709
717
710 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
716 def clearcache():
724 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
721
729
722 return clearcache
730 return clearcache
723
731
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
727
735
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
731
739
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
743 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
738
746
739
747
740 # utilities to clear cache
748 # utilities to clear cache
741
749
742
750
743 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
753 if unfiltered is not None:
746 obj = obj.unfiltered()
754 obj = obj.unfiltered()
747 if attrname in vars(obj):
755 if attrname in vars(obj):
748 delattr(obj, attrname)
756 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
750
758
751
759
752 def clearchangelog(repo):
760 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
757
765
758
766
759 # perf commands
767 # perf commands
760
768
761
769
762 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
767 timer(
775 timer(
768 lambda: len(
776 lambda: len(
769 list(
777 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
779 )
772 )
780 )
773 )
781 )
774 fm.end()
782 fm.end()
775
783
776
784
777 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
783 fm.end()
791 fm.end()
784
792
785
793
786 @command(
794 @command(
787 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
788 [
796 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
799 ]
792 + formatteropts,
800 + formatteropts,
793 )
801 )
794 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
796
804
797 The repository data are preserved between each call.
805 The repository data are preserved between each call.
798
806
799 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
801 """
809 """
802 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
813 # False))))
806 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
815 if opts[b'dirstate']:
808 dirstate = repo.dirstate
816 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
811
819
812 def status_dirstate():
820 def status_dirstate():
813 s = dirstate.status(
821 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
823 )
816 sum(map(bool, s))
824 sum(map(bool, s))
817
825
818 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
819 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
820 timer(status_dirstate)
828 timer(status_dirstate)
821 dirstate.invalidate()
829 dirstate.invalidate()
822 else:
830 else:
823 timer(status_dirstate)
831 timer(status_dirstate)
824 else:
832 else:
825 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
826 fm.end()
834 fm.end()
827
835
828
836
829 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
830 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
831 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
832 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
833 try:
841 try:
834 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
835 repo.ui.quiet = True
843 repo.ui.quiet = True
836 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
837 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
838 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
839 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
840 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
841 else:
849 else:
842 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
843 finally:
851 finally:
844 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
845 fm.end()
853 fm.end()
846
854
847
855
848 def clearcaches(cl):
856 def clearcaches(cl):
849 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
850 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
851 cl.clearcaches()
859 cl.clearcaches()
852 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
853 # <= hg-5.2
861 # <= hg-5.2
854 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
855
863
856 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
857 cl._nodepos = None
865 cl._nodepos = None
858
866
859
867
860 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
861 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
862 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
863 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
864 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
865 cl = repo.changelog
873 cl = repo.changelog
866
874
867 def s():
875 def s():
868 clearcaches(cl)
876 clearcaches(cl)
869
877
870 def d():
878 def d():
871 len(cl.headrevs())
879 len(cl.headrevs())
872
880
873 timer(d, setup=s)
881 timer(d, setup=s)
874 fm.end()
882 fm.end()
875
883
876
884
877 @command(
885 @command(
878 b'perf::tags|perftags',
886 b'perf::tags|perftags',
879 formatteropts
887 formatteropts
880 + [
888 + [
881 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
882 ],
890 ],
883 )
891 )
884 def perftags(ui, repo, **opts):
892 def perftags(ui, repo, **opts):
885 opts = _byteskwargs(opts)
893 opts = _byteskwargs(opts)
886 timer, fm = gettimer(ui, opts)
894 timer, fm = gettimer(ui, opts)
887 repocleartagscache = repocleartagscachefunc(repo)
895 repocleartagscache = repocleartagscachefunc(repo)
888 clearrevlogs = opts[b'clear_revlogs']
896 clearrevlogs = opts[b'clear_revlogs']
889
897
890 def s():
898 def s():
891 if clearrevlogs:
899 if clearrevlogs:
892 clearchangelog(repo)
900 clearchangelog(repo)
893 clearfilecache(repo.unfiltered(), 'manifest')
901 clearfilecache(repo.unfiltered(), 'manifest')
894 repocleartagscache()
902 repocleartagscache()
895
903
896 def t():
904 def t():
897 return len(repo.tags())
905 return len(repo.tags())
898
906
899 timer(t, setup=s)
907 timer(t, setup=s)
900 fm.end()
908 fm.end()
901
909
902
910
903 @command(b'perf::ancestors|perfancestors', formatteropts)
911 @command(b'perf::ancestors|perfancestors', formatteropts)
904 def perfancestors(ui, repo, **opts):
912 def perfancestors(ui, repo, **opts):
905 opts = _byteskwargs(opts)
913 opts = _byteskwargs(opts)
906 timer, fm = gettimer(ui, opts)
914 timer, fm = gettimer(ui, opts)
907 heads = repo.changelog.headrevs()
915 heads = repo.changelog.headrevs()
908
916
909 def d():
917 def d():
910 for a in repo.changelog.ancestors(heads):
918 for a in repo.changelog.ancestors(heads):
911 pass
919 pass
912
920
913 timer(d)
921 timer(d)
914 fm.end()
922 fm.end()
915
923
916
924
917 @command(b'perf::ancestorset|perfancestorset', formatteropts)
925 @command(b'perf::ancestorset|perfancestorset', formatteropts)
918 def perfancestorset(ui, repo, revset, **opts):
926 def perfancestorset(ui, repo, revset, **opts):
919 opts = _byteskwargs(opts)
927 opts = _byteskwargs(opts)
920 timer, fm = gettimer(ui, opts)
928 timer, fm = gettimer(ui, opts)
921 revs = repo.revs(revset)
929 revs = repo.revs(revset)
922 heads = repo.changelog.headrevs()
930 heads = repo.changelog.headrevs()
923
931
924 def d():
932 def d():
925 s = repo.changelog.ancestors(heads)
933 s = repo.changelog.ancestors(heads)
926 for rev in revs:
934 for rev in revs:
927 rev in s
935 rev in s
928
936
929 timer(d)
937 timer(d)
930 fm.end()
938 fm.end()
931
939
932
940
933 @command(
941 @command(
934 b'perf::delta-find',
942 b'perf::delta-find',
935 revlogopts + formatteropts,
943 revlogopts + formatteropts,
936 b'-c|-m|FILE REV',
944 b'-c|-m|FILE REV',
937 )
945 )
938 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
946 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
939 """benchmark the process of finding a valid delta for a revlog revision
947 """benchmark the process of finding a valid delta for a revlog revision
940
948
941 When a revlog receives a new revision (e.g. from a commit, or from an
949 When a revlog receives a new revision (e.g. from a commit, or from an
942 incoming bundle), it searches for a suitable delta-base to produce a delta.
950 incoming bundle), it searches for a suitable delta-base to produce a delta.
943 This perf command measures how much time we spend in this process. It
951 This perf command measures how much time we spend in this process. It
944 operates on an already stored revision.
952 operates on an already stored revision.
945
953
946 See `hg help debug-delta-find` for another related command.
954 See `hg help debug-delta-find` for another related command.
947 """
955 """
948 from mercurial import revlogutils
956 from mercurial import revlogutils
949 import mercurial.revlogutils.deltas as deltautil
957 import mercurial.revlogutils.deltas as deltautil
950
958
951 opts = _byteskwargs(opts)
959 opts = _byteskwargs(opts)
952 if arg_2 is None:
960 if arg_2 is None:
953 file_ = None
961 file_ = None
954 rev = arg_1
962 rev = arg_1
955 else:
963 else:
956 file_ = arg_1
964 file_ = arg_1
957 rev = arg_2
965 rev = arg_2
958
966
959 repo = repo.unfiltered()
967 repo = repo.unfiltered()
960
968
961 timer, fm = gettimer(ui, opts)
969 timer, fm = gettimer(ui, opts)
962
970
963 rev = int(rev)
971 rev = int(rev)
964
972
965 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
973 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
966
974
967 deltacomputer = deltautil.deltacomputer(revlog)
975 deltacomputer = deltautil.deltacomputer(revlog)
968
976
969 node = revlog.node(rev)
977 node = revlog.node(rev)
970 p1r, p2r = revlog.parentrevs(rev)
978 p1r, p2r = revlog.parentrevs(rev)
971 p1 = revlog.node(p1r)
979 p1 = revlog.node(p1r)
972 p2 = revlog.node(p2r)
980 p2 = revlog.node(p2r)
973 full_text = revlog.revision(rev)
981 full_text = revlog.revision(rev)
974 textlen = len(full_text)
982 textlen = len(full_text)
975 cachedelta = None
983 cachedelta = None
976 flags = revlog.flags(rev)
984 flags = revlog.flags(rev)
977
985
978 revinfo = revlogutils.revisioninfo(
986 revinfo = revlogutils.revisioninfo(
979 node,
987 node,
980 p1,
988 p1,
981 p2,
989 p2,
982 [full_text], # btext
990 [full_text], # btext
983 textlen,
991 textlen,
984 cachedelta,
992 cachedelta,
985 flags,
993 flags,
986 )
994 )
987
995
988 # Note: we should probably purge the potential caches (like the full
996 # Note: we should probably purge the potential caches (like the full
989 # manifest cache) between runs.
997 # manifest cache) between runs.
990 def find_one():
998 def find_one():
991 with revlog._datafp() as fh:
999 with revlog._datafp() as fh:
992 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1000 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
993
1001
994 timer(find_one)
1002 timer(find_one)
995 fm.end()
1003 fm.end()
996
1004
997
1005
998 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1006 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
999 def perfdiscovery(ui, repo, path, **opts):
1007 def perfdiscovery(ui, repo, path, **opts):
1000 """benchmark discovery between local repo and the peer at given path"""
1008 """benchmark discovery between local repo and the peer at given path"""
1001 repos = [repo, None]
1009 repos = [repo, None]
1002 timer, fm = gettimer(ui, opts)
1010 timer, fm = gettimer(ui, opts)
1003
1011
1004 try:
1012 try:
1005 from mercurial.utils.urlutil import get_unique_pull_path_obj
1013 from mercurial.utils.urlutil import get_unique_pull_path_obj
1006
1014
1007 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1015 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1008 except ImportError:
1016 except ImportError:
1009 try:
1017 try:
1010 from mercurial.utils.urlutil import get_unique_pull_path
1018 from mercurial.utils.urlutil import get_unique_pull_path
1011
1019
1012 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1020 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1013 except ImportError:
1021 except ImportError:
1014 path = ui.expandpath(path)
1022 path = ui.expandpath(path)
1015
1023
1016 def s():
1024 def s():
1017 repos[1] = hg.peer(ui, opts, path)
1025 repos[1] = hg.peer(ui, opts, path)
1018
1026
1019 def d():
1027 def d():
1020 setdiscovery.findcommonheads(ui, *repos)
1028 setdiscovery.findcommonheads(ui, *repos)
1021
1029
1022 timer(d, setup=s)
1030 timer(d, setup=s)
1023 fm.end()
1031 fm.end()
1024
1032
1025
1033
1026 @command(
1034 @command(
1027 b'perf::bookmarks|perfbookmarks',
1035 b'perf::bookmarks|perfbookmarks',
1028 formatteropts
1036 formatteropts
1029 + [
1037 + [
1030 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1038 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1031 ],
1039 ],
1032 )
1040 )
1033 def perfbookmarks(ui, repo, **opts):
1041 def perfbookmarks(ui, repo, **opts):
1034 """benchmark parsing bookmarks from disk to memory"""
1042 """benchmark parsing bookmarks from disk to memory"""
1035 opts = _byteskwargs(opts)
1043 opts = _byteskwargs(opts)
1036 timer, fm = gettimer(ui, opts)
1044 timer, fm = gettimer(ui, opts)
1037
1045
1038 clearrevlogs = opts[b'clear_revlogs']
1046 clearrevlogs = opts[b'clear_revlogs']
1039
1047
1040 def s():
1048 def s():
1041 if clearrevlogs:
1049 if clearrevlogs:
1042 clearchangelog(repo)
1050 clearchangelog(repo)
1043 clearfilecache(repo, b'_bookmarks')
1051 clearfilecache(repo, b'_bookmarks')
1044
1052
1045 def d():
1053 def d():
1046 repo._bookmarks
1054 repo._bookmarks
1047
1055
1048 timer(d, setup=s)
1056 timer(d, setup=s)
1049 fm.end()
1057 fm.end()
1050
1058
1051
1059
1052 @command(
1060 @command(
1053 b'perf::bundle',
1061 b'perf::bundle',
1054 [
1062 [
1055 (
1063 (
1056 b'r',
1064 b'r',
1057 b'rev',
1065 b'rev',
1058 [],
1066 [],
1059 b'changesets to bundle',
1067 b'changesets to bundle',
1060 b'REV',
1068 b'REV',
1061 ),
1069 ),
1062 (
1070 (
1063 b't',
1071 b't',
1064 b'type',
1072 b'type',
1065 b'none',
1073 b'none',
1066 b'bundlespec to use (see `hg help bundlespec`)',
1074 b'bundlespec to use (see `hg help bundlespec`)',
1067 b'TYPE',
1075 b'TYPE',
1068 ),
1076 ),
1069 ]
1077 ]
1070 + formatteropts,
1078 + formatteropts,
1071 b'REVS',
1079 b'REVS',
1072 )
1080 )
1073 def perfbundle(ui, repo, *revs, **opts):
1081 def perfbundle(ui, repo, *revs, **opts):
1074 """benchmark the creation of a bundle from a repository
1082 """benchmark the creation of a bundle from a repository
1075
1083
1076 For now, this only supports "none" compression.
1084 For now, this only supports "none" compression.
1077 """
1085 """
1078 try:
1086 try:
1079 from mercurial import bundlecaches
1087 from mercurial import bundlecaches
1080
1088
1081 parsebundlespec = bundlecaches.parsebundlespec
1089 parsebundlespec = bundlecaches.parsebundlespec
1082 except ImportError:
1090 except ImportError:
1083 from mercurial import exchange
1091 from mercurial import exchange
1084
1092
1085 parsebundlespec = exchange.parsebundlespec
1093 parsebundlespec = exchange.parsebundlespec
1086
1094
1087 from mercurial import discovery
1095 from mercurial import discovery
1088 from mercurial import bundle2
1096 from mercurial import bundle2
1089
1097
1090 opts = _byteskwargs(opts)
1098 opts = _byteskwargs(opts)
1091 timer, fm = gettimer(ui, opts)
1099 timer, fm = gettimer(ui, opts)
1092
1100
1093 cl = repo.changelog
1101 cl = repo.changelog
1094 revs = list(revs)
1102 revs = list(revs)
1095 revs.extend(opts.get(b'rev', ()))
1103 revs.extend(opts.get(b'rev', ()))
1096 revs = scmutil.revrange(repo, revs)
1104 revs = scmutil.revrange(repo, revs)
1097 if not revs:
1105 if not revs:
1098 raise error.Abort(b"not revision specified")
1106 raise error.Abort(b"not revision specified")
1099 # make it a consistent set (ie: without topological gaps)
1107 # make it a consistent set (ie: without topological gaps)
1100 old_len = len(revs)
1108 old_len = len(revs)
1101 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1109 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1102 if old_len != len(revs):
1110 if old_len != len(revs):
1103 new_count = len(revs) - old_len
1111 new_count = len(revs) - old_len
1104 msg = b"add %d new revisions to make it a consistent set\n"
1112 msg = b"add %d new revisions to make it a consistent set\n"
1105 ui.write_err(msg % new_count)
1113 ui.write_err(msg % new_count)
1106
1114
1107 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1115 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1108 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1116 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1109 outgoing = discovery.outgoing(repo, bases, targets)
1117 outgoing = discovery.outgoing(repo, bases, targets)
1110
1118
1111 bundle_spec = opts.get(b'type')
1119 bundle_spec = opts.get(b'type')
1112
1120
1113 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1121 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1114
1122
1115 cgversion = bundle_spec.params.get(b"cg.version")
1123 cgversion = bundle_spec.params.get(b"cg.version")
1116 if cgversion is None:
1124 if cgversion is None:
1117 if bundle_spec.version == b'v1':
1125 if bundle_spec.version == b'v1':
1118 cgversion = b'01'
1126 cgversion = b'01'
1119 if bundle_spec.version == b'v2':
1127 if bundle_spec.version == b'v2':
1120 cgversion = b'02'
1128 cgversion = b'02'
1121 if cgversion not in changegroup.supportedoutgoingversions(repo):
1129 if cgversion not in changegroup.supportedoutgoingversions(repo):
1122 err = b"repository does not support bundle version %s"
1130 err = b"repository does not support bundle version %s"
1123 raise error.Abort(err % cgversion)
1131 raise error.Abort(err % cgversion)
1124
1132
1125 if cgversion == b'01': # bundle1
1133 if cgversion == b'01': # bundle1
1126 bversion = b'HG10' + bundle_spec.wirecompression
1134 bversion = b'HG10' + bundle_spec.wirecompression
1127 bcompression = None
1135 bcompression = None
1128 elif cgversion in (b'02', b'03'):
1136 elif cgversion in (b'02', b'03'):
1129 bversion = b'HG20'
1137 bversion = b'HG20'
1130 bcompression = bundle_spec.wirecompression
1138 bcompression = bundle_spec.wirecompression
1131 else:
1139 else:
1132 err = b'perf::bundle: unexpected changegroup version %s'
1140 err = b'perf::bundle: unexpected changegroup version %s'
1133 raise error.ProgrammingError(err % cgversion)
1141 raise error.ProgrammingError(err % cgversion)
1134
1142
1135 if bcompression is None:
1143 if bcompression is None:
1136 bcompression = b'UN'
1144 bcompression = b'UN'
1137
1145
1138 if bcompression != b'UN':
1146 if bcompression != b'UN':
1139 err = b'perf::bundle: compression currently unsupported: %s'
1147 err = b'perf::bundle: compression currently unsupported: %s'
1140 raise error.ProgrammingError(err % bcompression)
1148 raise error.ProgrammingError(err % bcompression)
1141
1149
1142 def do_bundle():
1150 def do_bundle():
1143 bundle2.writenewbundle(
1151 bundle2.writenewbundle(
1144 ui,
1152 ui,
1145 repo,
1153 repo,
1146 b'perf::bundle',
1154 b'perf::bundle',
1147 os.devnull,
1155 os.devnull,
1148 bversion,
1156 bversion,
1149 outgoing,
1157 outgoing,
1150 bundle_spec.params,
1158 bundle_spec.params,
1151 )
1159 )
1152
1160
1153 timer(do_bundle)
1161 timer(do_bundle)
1154 fm.end()
1162 fm.end()
1155
1163
1156
1164
1157 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1165 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1158 def perfbundleread(ui, repo, bundlepath, **opts):
1166 def perfbundleread(ui, repo, bundlepath, **opts):
1159 """Benchmark reading of bundle files.
1167 """Benchmark reading of bundle files.
1160
1168
1161 This command is meant to isolate the I/O part of bundle reading as
1169 This command is meant to isolate the I/O part of bundle reading as
1162 much as possible.
1170 much as possible.
1163 """
1171 """
1164 from mercurial import (
1172 from mercurial import (
1165 bundle2,
1173 bundle2,
1166 exchange,
1174 exchange,
1167 streamclone,
1175 streamclone,
1168 )
1176 )
1169
1177
1170 opts = _byteskwargs(opts)
1178 opts = _byteskwargs(opts)
1171
1179
1172 def makebench(fn):
1180 def makebench(fn):
1173 def run():
1181 def run():
1174 with open(bundlepath, b'rb') as fh:
1182 with open(bundlepath, b'rb') as fh:
1175 bundle = exchange.readbundle(ui, fh, bundlepath)
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1176 fn(bundle)
1184 fn(bundle)
1177
1185
1178 return run
1186 return run
1179
1187
1180 def makereadnbytes(size):
1188 def makereadnbytes(size):
1181 def run():
1189 def run():
1182 with open(bundlepath, b'rb') as fh:
1190 with open(bundlepath, b'rb') as fh:
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1191 bundle = exchange.readbundle(ui, fh, bundlepath)
1184 while bundle.read(size):
1192 while bundle.read(size):
1185 pass
1193 pass
1186
1194
1187 return run
1195 return run
1188
1196
1189 def makestdioread(size):
1197 def makestdioread(size):
1190 def run():
1198 def run():
1191 with open(bundlepath, b'rb') as fh:
1199 with open(bundlepath, b'rb') as fh:
1192 while fh.read(size):
1200 while fh.read(size):
1193 pass
1201 pass
1194
1202
1195 return run
1203 return run
1196
1204
1197 # bundle1
1205 # bundle1
1198
1206
1199 def deltaiter(bundle):
1207 def deltaiter(bundle):
1200 for delta in bundle.deltaiter():
1208 for delta in bundle.deltaiter():
1201 pass
1209 pass
1202
1210
1203 def iterchunks(bundle):
1211 def iterchunks(bundle):
1204 for chunk in bundle.getchunks():
1212 for chunk in bundle.getchunks():
1205 pass
1213 pass
1206
1214
1207 # bundle2
1215 # bundle2
1208
1216
1209 def forwardchunks(bundle):
1217 def forwardchunks(bundle):
1210 for chunk in bundle._forwardchunks():
1218 for chunk in bundle._forwardchunks():
1211 pass
1219 pass
1212
1220
1213 def iterparts(bundle):
1221 def iterparts(bundle):
1214 for part in bundle.iterparts():
1222 for part in bundle.iterparts():
1215 pass
1223 pass
1216
1224
1217 def iterpartsseekable(bundle):
1225 def iterpartsseekable(bundle):
1218 for part in bundle.iterparts(seekable=True):
1226 for part in bundle.iterparts(seekable=True):
1219 pass
1227 pass
1220
1228
1221 def seek(bundle):
1229 def seek(bundle):
1222 for part in bundle.iterparts(seekable=True):
1230 for part in bundle.iterparts(seekable=True):
1223 part.seek(0, os.SEEK_END)
1231 part.seek(0, os.SEEK_END)
1224
1232
1225 def makepartreadnbytes(size):
1233 def makepartreadnbytes(size):
1226 def run():
1234 def run():
1227 with open(bundlepath, b'rb') as fh:
1235 with open(bundlepath, b'rb') as fh:
1228 bundle = exchange.readbundle(ui, fh, bundlepath)
1236 bundle = exchange.readbundle(ui, fh, bundlepath)
1229 for part in bundle.iterparts():
1237 for part in bundle.iterparts():
1230 while part.read(size):
1238 while part.read(size):
1231 pass
1239 pass
1232
1240
1233 return run
1241 return run
1234
1242
1235 benches = [
1243 benches = [
1236 (makestdioread(8192), b'read(8k)'),
1244 (makestdioread(8192), b'read(8k)'),
1237 (makestdioread(16384), b'read(16k)'),
1245 (makestdioread(16384), b'read(16k)'),
1238 (makestdioread(32768), b'read(32k)'),
1246 (makestdioread(32768), b'read(32k)'),
1239 (makestdioread(131072), b'read(128k)'),
1247 (makestdioread(131072), b'read(128k)'),
1240 ]
1248 ]
1241
1249
1242 with open(bundlepath, b'rb') as fh:
1250 with open(bundlepath, b'rb') as fh:
1243 bundle = exchange.readbundle(ui, fh, bundlepath)
1251 bundle = exchange.readbundle(ui, fh, bundlepath)
1244
1252
1245 if isinstance(bundle, changegroup.cg1unpacker):
1253 if isinstance(bundle, changegroup.cg1unpacker):
1246 benches.extend(
1254 benches.extend(
1247 [
1255 [
1248 (makebench(deltaiter), b'cg1 deltaiter()'),
1256 (makebench(deltaiter), b'cg1 deltaiter()'),
1249 (makebench(iterchunks), b'cg1 getchunks()'),
1257 (makebench(iterchunks), b'cg1 getchunks()'),
1250 (makereadnbytes(8192), b'cg1 read(8k)'),
1258 (makereadnbytes(8192), b'cg1 read(8k)'),
1251 (makereadnbytes(16384), b'cg1 read(16k)'),
1259 (makereadnbytes(16384), b'cg1 read(16k)'),
1252 (makereadnbytes(32768), b'cg1 read(32k)'),
1260 (makereadnbytes(32768), b'cg1 read(32k)'),
1253 (makereadnbytes(131072), b'cg1 read(128k)'),
1261 (makereadnbytes(131072), b'cg1 read(128k)'),
1254 ]
1262 ]
1255 )
1263 )
1256 elif isinstance(bundle, bundle2.unbundle20):
1264 elif isinstance(bundle, bundle2.unbundle20):
1257 benches.extend(
1265 benches.extend(
1258 [
1266 [
1259 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1267 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1260 (makebench(iterparts), b'bundle2 iterparts()'),
1268 (makebench(iterparts), b'bundle2 iterparts()'),
1261 (
1269 (
1262 makebench(iterpartsseekable),
1270 makebench(iterpartsseekable),
1263 b'bundle2 iterparts() seekable',
1271 b'bundle2 iterparts() seekable',
1264 ),
1272 ),
1265 (makebench(seek), b'bundle2 part seek()'),
1273 (makebench(seek), b'bundle2 part seek()'),
1266 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1274 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1267 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1275 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1268 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1276 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1269 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1277 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1270 ]
1278 ]
1271 )
1279 )
1272 elif isinstance(bundle, streamclone.streamcloneapplier):
1280 elif isinstance(bundle, streamclone.streamcloneapplier):
1273 raise error.Abort(b'stream clone bundles not supported')
1281 raise error.Abort(b'stream clone bundles not supported')
1274 else:
1282 else:
1275 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1283 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1276
1284
1277 for fn, title in benches:
1285 for fn, title in benches:
1278 timer, fm = gettimer(ui, opts)
1286 timer, fm = gettimer(ui, opts)
1279 timer(fn, title=title)
1287 timer(fn, title=title)
1280 fm.end()
1288 fm.end()
1281
1289
1282
1290
1283 @command(
1291 @command(
1284 b'perf::changegroupchangelog|perfchangegroupchangelog',
1292 b'perf::changegroupchangelog|perfchangegroupchangelog',
1285 formatteropts
1293 formatteropts
1286 + [
1294 + [
1287 (b'', b'cgversion', b'02', b'changegroup version'),
1295 (b'', b'cgversion', b'02', b'changegroup version'),
1288 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1296 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1289 ],
1297 ],
1290 )
1298 )
1291 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1299 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1292 """Benchmark producing a changelog group for a changegroup.
1300 """Benchmark producing a changelog group for a changegroup.
1293
1301
1294 This measures the time spent processing the changelog during a
1302 This measures the time spent processing the changelog during a
1295 bundle operation. This occurs during `hg bundle` and on a server
1303 bundle operation. This occurs during `hg bundle` and on a server
1296 processing a `getbundle` wire protocol request (handles clones
1304 processing a `getbundle` wire protocol request (handles clones
1297 and pull requests).
1305 and pull requests).
1298
1306
1299 By default, all revisions are added to the changegroup.
1307 By default, all revisions are added to the changegroup.
1300 """
1308 """
1301 opts = _byteskwargs(opts)
1309 opts = _byteskwargs(opts)
1302 cl = repo.changelog
1310 cl = repo.changelog
1303 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1311 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1304 bundler = changegroup.getbundler(cgversion, repo)
1312 bundler = changegroup.getbundler(cgversion, repo)
1305
1313
1306 def d():
1314 def d():
1307 state, chunks = bundler._generatechangelog(cl, nodes)
1315 state, chunks = bundler._generatechangelog(cl, nodes)
1308 for chunk in chunks:
1316 for chunk in chunks:
1309 pass
1317 pass
1310
1318
1311 timer, fm = gettimer(ui, opts)
1319 timer, fm = gettimer(ui, opts)
1312
1320
1313 # Terminal printing can interfere with timing. So disable it.
1321 # Terminal printing can interfere with timing. So disable it.
1314 with ui.configoverride({(b'progress', b'disable'): True}):
1322 with ui.configoverride({(b'progress', b'disable'): True}):
1315 timer(d)
1323 timer(d)
1316
1324
1317 fm.end()
1325 fm.end()
1318
1326
1319
1327
1320 @command(b'perf::dirs|perfdirs', formatteropts)
1328 @command(b'perf::dirs|perfdirs', formatteropts)
1321 def perfdirs(ui, repo, **opts):
1329 def perfdirs(ui, repo, **opts):
1322 opts = _byteskwargs(opts)
1330 opts = _byteskwargs(opts)
1323 timer, fm = gettimer(ui, opts)
1331 timer, fm = gettimer(ui, opts)
1324 dirstate = repo.dirstate
1332 dirstate = repo.dirstate
1325 b'a' in dirstate
1333 b'a' in dirstate
1326
1334
1327 def d():
1335 def d():
1328 dirstate.hasdir(b'a')
1336 dirstate.hasdir(b'a')
1329 try:
1337 try:
1330 del dirstate._map._dirs
1338 del dirstate._map._dirs
1331 except AttributeError:
1339 except AttributeError:
1332 pass
1340 pass
1333
1341
1334 timer(d)
1342 timer(d)
1335 fm.end()
1343 fm.end()
1336
1344
1337
1345
1338 @command(
1346 @command(
1339 b'perf::dirstate|perfdirstate',
1347 b'perf::dirstate|perfdirstate',
1340 [
1348 [
1341 (
1349 (
1342 b'',
1350 b'',
1343 b'iteration',
1351 b'iteration',
1344 None,
1352 None,
1345 b'benchmark a full iteration for the dirstate',
1353 b'benchmark a full iteration for the dirstate',
1346 ),
1354 ),
1347 (
1355 (
1348 b'',
1356 b'',
1349 b'contains',
1357 b'contains',
1350 None,
1358 None,
1351 b'benchmark a large amount of `nf in dirstate` calls',
1359 b'benchmark a large amount of `nf in dirstate` calls',
1352 ),
1360 ),
1353 ]
1361 ]
1354 + formatteropts,
1362 + formatteropts,
1355 )
1363 )
1356 def perfdirstate(ui, repo, **opts):
1364 def perfdirstate(ui, repo, **opts):
1357 """benchmap the time of various distate operations
1365 """benchmap the time of various distate operations
1358
1366
1359 By default benchmark the time necessary to load a dirstate from scratch.
1367 By default benchmark the time necessary to load a dirstate from scratch.
1360 The dirstate is loaded to the point were a "contains" request can be
1368 The dirstate is loaded to the point were a "contains" request can be
1361 answered.
1369 answered.
1362 """
1370 """
1363 opts = _byteskwargs(opts)
1371 opts = _byteskwargs(opts)
1364 timer, fm = gettimer(ui, opts)
1372 timer, fm = gettimer(ui, opts)
1365 b"a" in repo.dirstate
1373 b"a" in repo.dirstate
1366
1374
1367 if opts[b'iteration'] and opts[b'contains']:
1375 if opts[b'iteration'] and opts[b'contains']:
1368 msg = b'only specify one of --iteration or --contains'
1376 msg = b'only specify one of --iteration or --contains'
1369 raise error.Abort(msg)
1377 raise error.Abort(msg)
1370
1378
1371 if opts[b'iteration']:
1379 if opts[b'iteration']:
1372 setup = None
1380 setup = None
1373 dirstate = repo.dirstate
1381 dirstate = repo.dirstate
1374
1382
1375 def d():
1383 def d():
1376 for f in dirstate:
1384 for f in dirstate:
1377 pass
1385 pass
1378
1386
1379 elif opts[b'contains']:
1387 elif opts[b'contains']:
1380 setup = None
1388 setup = None
1381 dirstate = repo.dirstate
1389 dirstate = repo.dirstate
1382 allfiles = list(dirstate)
1390 allfiles = list(dirstate)
1383 # also add file path that will be "missing" from the dirstate
1391 # also add file path that will be "missing" from the dirstate
1384 allfiles.extend([f[::-1] for f in allfiles])
1392 allfiles.extend([f[::-1] for f in allfiles])
1385
1393
1386 def d():
1394 def d():
1387 for f in allfiles:
1395 for f in allfiles:
1388 f in dirstate
1396 f in dirstate
1389
1397
1390 else:
1398 else:
1391
1399
1392 def setup():
1400 def setup():
1393 repo.dirstate.invalidate()
1401 repo.dirstate.invalidate()
1394
1402
1395 def d():
1403 def d():
1396 b"a" in repo.dirstate
1404 b"a" in repo.dirstate
1397
1405
1398 timer(d, setup=setup)
1406 timer(d, setup=setup)
1399 fm.end()
1407 fm.end()
1400
1408
1401
1409
1402 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1410 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1403 def perfdirstatedirs(ui, repo, **opts):
1411 def perfdirstatedirs(ui, repo, **opts):
1404 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1412 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1405 opts = _byteskwargs(opts)
1413 opts = _byteskwargs(opts)
1406 timer, fm = gettimer(ui, opts)
1414 timer, fm = gettimer(ui, opts)
1407 repo.dirstate.hasdir(b"a")
1415 repo.dirstate.hasdir(b"a")
1408
1416
1409 def setup():
1417 def setup():
1410 try:
1418 try:
1411 del repo.dirstate._map._dirs
1419 del repo.dirstate._map._dirs
1412 except AttributeError:
1420 except AttributeError:
1413 pass
1421 pass
1414
1422
1415 def d():
1423 def d():
1416 repo.dirstate.hasdir(b"a")
1424 repo.dirstate.hasdir(b"a")
1417
1425
1418 timer(d, setup=setup)
1426 timer(d, setup=setup)
1419 fm.end()
1427 fm.end()
1420
1428
1421
1429
1422 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1430 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1423 def perfdirstatefoldmap(ui, repo, **opts):
1431 def perfdirstatefoldmap(ui, repo, **opts):
1424 """benchmap a `dirstate._map.filefoldmap.get()` request
1432 """benchmap a `dirstate._map.filefoldmap.get()` request
1425
1433
1426 The dirstate filefoldmap cache is dropped between every request.
1434 The dirstate filefoldmap cache is dropped between every request.
1427 """
1435 """
1428 opts = _byteskwargs(opts)
1436 opts = _byteskwargs(opts)
1429 timer, fm = gettimer(ui, opts)
1437 timer, fm = gettimer(ui, opts)
1430 dirstate = repo.dirstate
1438 dirstate = repo.dirstate
1431 dirstate._map.filefoldmap.get(b'a')
1439 dirstate._map.filefoldmap.get(b'a')
1432
1440
1433 def setup():
1441 def setup():
1434 del dirstate._map.filefoldmap
1442 del dirstate._map.filefoldmap
1435
1443
1436 def d():
1444 def d():
1437 dirstate._map.filefoldmap.get(b'a')
1445 dirstate._map.filefoldmap.get(b'a')
1438
1446
1439 timer(d, setup=setup)
1447 timer(d, setup=setup)
1440 fm.end()
1448 fm.end()
1441
1449
1442
1450
1443 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1451 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1444 def perfdirfoldmap(ui, repo, **opts):
1452 def perfdirfoldmap(ui, repo, **opts):
1445 """benchmap a `dirstate._map.dirfoldmap.get()` request
1453 """benchmap a `dirstate._map.dirfoldmap.get()` request
1446
1454
1447 The dirstate dirfoldmap cache is dropped between every request.
1455 The dirstate dirfoldmap cache is dropped between every request.
1448 """
1456 """
1449 opts = _byteskwargs(opts)
1457 opts = _byteskwargs(opts)
1450 timer, fm = gettimer(ui, opts)
1458 timer, fm = gettimer(ui, opts)
1451 dirstate = repo.dirstate
1459 dirstate = repo.dirstate
1452 dirstate._map.dirfoldmap.get(b'a')
1460 dirstate._map.dirfoldmap.get(b'a')
1453
1461
1454 def setup():
1462 def setup():
1455 del dirstate._map.dirfoldmap
1463 del dirstate._map.dirfoldmap
1456 try:
1464 try:
1457 del dirstate._map._dirs
1465 del dirstate._map._dirs
1458 except AttributeError:
1466 except AttributeError:
1459 pass
1467 pass
1460
1468
1461 def d():
1469 def d():
1462 dirstate._map.dirfoldmap.get(b'a')
1470 dirstate._map.dirfoldmap.get(b'a')
1463
1471
1464 timer(d, setup=setup)
1472 timer(d, setup=setup)
1465 fm.end()
1473 fm.end()
1466
1474
1467
1475
1468 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1476 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1469 def perfdirstatewrite(ui, repo, **opts):
1477 def perfdirstatewrite(ui, repo, **opts):
1470 """benchmap the time it take to write a dirstate on disk"""
1478 """benchmap the time it take to write a dirstate on disk"""
1471 opts = _byteskwargs(opts)
1479 opts = _byteskwargs(opts)
1472 timer, fm = gettimer(ui, opts)
1480 timer, fm = gettimer(ui, opts)
1473 ds = repo.dirstate
1481 ds = repo.dirstate
1474 b"a" in ds
1482 b"a" in ds
1475
1483
1476 def setup():
1484 def setup():
1477 ds._dirty = True
1485 ds._dirty = True
1478
1486
1479 def d():
1487 def d():
1480 ds.write(repo.currenttransaction())
1488 ds.write(repo.currenttransaction())
1481
1489
1482 with repo.wlock():
1490 with repo.wlock():
1483 timer(d, setup=setup)
1491 timer(d, setup=setup)
1484 fm.end()
1492 fm.end()
1485
1493
1486
1494
1487 def _getmergerevs(repo, opts):
1495 def _getmergerevs(repo, opts):
1488 """parse command argument to return rev involved in merge
1496 """parse command argument to return rev involved in merge
1489
1497
1490 input: options dictionnary with `rev`, `from` and `bse`
1498 input: options dictionnary with `rev`, `from` and `bse`
1491 output: (localctx, otherctx, basectx)
1499 output: (localctx, otherctx, basectx)
1492 """
1500 """
1493 if opts[b'from']:
1501 if opts[b'from']:
1494 fromrev = scmutil.revsingle(repo, opts[b'from'])
1502 fromrev = scmutil.revsingle(repo, opts[b'from'])
1495 wctx = repo[fromrev]
1503 wctx = repo[fromrev]
1496 else:
1504 else:
1497 wctx = repo[None]
1505 wctx = repo[None]
1498 # we don't want working dir files to be stat'd in the benchmark, so
1506 # we don't want working dir files to be stat'd in the benchmark, so
1499 # prime that cache
1507 # prime that cache
1500 wctx.dirty()
1508 wctx.dirty()
1501 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1509 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1502 if opts[b'base']:
1510 if opts[b'base']:
1503 fromrev = scmutil.revsingle(repo, opts[b'base'])
1511 fromrev = scmutil.revsingle(repo, opts[b'base'])
1504 ancestor = repo[fromrev]
1512 ancestor = repo[fromrev]
1505 else:
1513 else:
1506 ancestor = wctx.ancestor(rctx)
1514 ancestor = wctx.ancestor(rctx)
1507 return (wctx, rctx, ancestor)
1515 return (wctx, rctx, ancestor)
1508
1516
1509
1517
1510 @command(
1518 @command(
1511 b'perf::mergecalculate|perfmergecalculate',
1519 b'perf::mergecalculate|perfmergecalculate',
1512 [
1520 [
1513 (b'r', b'rev', b'.', b'rev to merge against'),
1521 (b'r', b'rev', b'.', b'rev to merge against'),
1514 (b'', b'from', b'', b'rev to merge from'),
1522 (b'', b'from', b'', b'rev to merge from'),
1515 (b'', b'base', b'', b'the revision to use as base'),
1523 (b'', b'base', b'', b'the revision to use as base'),
1516 ]
1524 ]
1517 + formatteropts,
1525 + formatteropts,
1518 )
1526 )
1519 def perfmergecalculate(ui, repo, **opts):
1527 def perfmergecalculate(ui, repo, **opts):
1520 opts = _byteskwargs(opts)
1528 opts = _byteskwargs(opts)
1521 timer, fm = gettimer(ui, opts)
1529 timer, fm = gettimer(ui, opts)
1522
1530
1523 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1531 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1524
1532
1525 def d():
1533 def d():
1526 # acceptremote is True because we don't want prompts in the middle of
1534 # acceptremote is True because we don't want prompts in the middle of
1527 # our benchmark
1535 # our benchmark
1528 merge.calculateupdates(
1536 merge.calculateupdates(
1529 repo,
1537 repo,
1530 wctx,
1538 wctx,
1531 rctx,
1539 rctx,
1532 [ancestor],
1540 [ancestor],
1533 branchmerge=False,
1541 branchmerge=False,
1534 force=False,
1542 force=False,
1535 acceptremote=True,
1543 acceptremote=True,
1536 followcopies=True,
1544 followcopies=True,
1537 )
1545 )
1538
1546
1539 timer(d)
1547 timer(d)
1540 fm.end()
1548 fm.end()
1541
1549
1542
1550
1543 @command(
1551 @command(
1544 b'perf::mergecopies|perfmergecopies',
1552 b'perf::mergecopies|perfmergecopies',
1545 [
1553 [
1546 (b'r', b'rev', b'.', b'rev to merge against'),
1554 (b'r', b'rev', b'.', b'rev to merge against'),
1547 (b'', b'from', b'', b'rev to merge from'),
1555 (b'', b'from', b'', b'rev to merge from'),
1548 (b'', b'base', b'', b'the revision to use as base'),
1556 (b'', b'base', b'', b'the revision to use as base'),
1549 ]
1557 ]
1550 + formatteropts,
1558 + formatteropts,
1551 )
1559 )
1552 def perfmergecopies(ui, repo, **opts):
1560 def perfmergecopies(ui, repo, **opts):
1553 """measure runtime of `copies.mergecopies`"""
1561 """measure runtime of `copies.mergecopies`"""
1554 opts = _byteskwargs(opts)
1562 opts = _byteskwargs(opts)
1555 timer, fm = gettimer(ui, opts)
1563 timer, fm = gettimer(ui, opts)
1556 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1564 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1557
1565
1558 def d():
1566 def d():
1559 # acceptremote is True because we don't want prompts in the middle of
1567 # acceptremote is True because we don't want prompts in the middle of
1560 # our benchmark
1568 # our benchmark
1561 copies.mergecopies(repo, wctx, rctx, ancestor)
1569 copies.mergecopies(repo, wctx, rctx, ancestor)
1562
1570
1563 timer(d)
1571 timer(d)
1564 fm.end()
1572 fm.end()
1565
1573
1566
1574
1567 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1575 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1568 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1576 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1569 """benchmark the copy tracing logic"""
1577 """benchmark the copy tracing logic"""
1570 opts = _byteskwargs(opts)
1578 opts = _byteskwargs(opts)
1571 timer, fm = gettimer(ui, opts)
1579 timer, fm = gettimer(ui, opts)
1572 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1580 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1573 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1581 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1574
1582
1575 def d():
1583 def d():
1576 copies.pathcopies(ctx1, ctx2)
1584 copies.pathcopies(ctx1, ctx2)
1577
1585
1578 timer(d)
1586 timer(d)
1579 fm.end()
1587 fm.end()
1580
1588
1581
1589
1582 @command(
1590 @command(
1583 b'perf::phases|perfphases',
1591 b'perf::phases|perfphases',
1584 [
1592 [
1585 (b'', b'full', False, b'include file reading time too'),
1593 (b'', b'full', False, b'include file reading time too'),
1586 ],
1594 ],
1587 b"",
1595 b"",
1588 )
1596 )
1589 def perfphases(ui, repo, **opts):
1597 def perfphases(ui, repo, **opts):
1590 """benchmark phasesets computation"""
1598 """benchmark phasesets computation"""
1591 opts = _byteskwargs(opts)
1599 opts = _byteskwargs(opts)
1592 timer, fm = gettimer(ui, opts)
1600 timer, fm = gettimer(ui, opts)
1593 _phases = repo._phasecache
1601 _phases = repo._phasecache
1594 full = opts.get(b'full')
1602 full = opts.get(b'full')
1595
1603
1596 def d():
1604 def d():
1597 phases = _phases
1605 phases = _phases
1598 if full:
1606 if full:
1599 clearfilecache(repo, b'_phasecache')
1607 clearfilecache(repo, b'_phasecache')
1600 phases = repo._phasecache
1608 phases = repo._phasecache
1601 phases.invalidate()
1609 phases.invalidate()
1602 phases.loadphaserevs(repo)
1610 phases.loadphaserevs(repo)
1603
1611
1604 timer(d)
1612 timer(d)
1605 fm.end()
1613 fm.end()
1606
1614
1607
1615
1608 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1616 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1609 def perfphasesremote(ui, repo, dest=None, **opts):
1617 def perfphasesremote(ui, repo, dest=None, **opts):
1610 """benchmark time needed to analyse phases of the remote server"""
1618 """benchmark time needed to analyse phases of the remote server"""
1611 from mercurial.node import bin
1619 from mercurial.node import bin
1612 from mercurial import (
1620 from mercurial import (
1613 exchange,
1621 exchange,
1614 hg,
1622 hg,
1615 phases,
1623 phases,
1616 )
1624 )
1617
1625
1618 opts = _byteskwargs(opts)
1626 opts = _byteskwargs(opts)
1619 timer, fm = gettimer(ui, opts)
1627 timer, fm = gettimer(ui, opts)
1620
1628
1621 path = ui.getpath(dest, default=(b'default-push', b'default'))
1629 path = ui.getpath(dest, default=(b'default-push', b'default'))
1622 if not path:
1630 if not path:
1623 raise error.Abort(
1631 raise error.Abort(
1624 b'default repository not configured!',
1632 b'default repository not configured!',
1625 hint=b"see 'hg help config.paths'",
1633 hint=b"see 'hg help config.paths'",
1626 )
1634 )
1627 if util.safehasattr(path, 'main_path'):
1635 if util.safehasattr(path, 'main_path'):
1628 path = path.get_push_variant()
1636 path = path.get_push_variant()
1629 dest = path.loc
1637 dest = path.loc
1630 else:
1638 else:
1631 dest = path.pushloc or path.loc
1639 dest = path.pushloc or path.loc
1632 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1640 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1633 other = hg.peer(repo, opts, dest)
1641 other = hg.peer(repo, opts, dest)
1634
1642
1635 # easier to perform discovery through the operation
1643 # easier to perform discovery through the operation
1636 op = exchange.pushoperation(repo, other)
1644 op = exchange.pushoperation(repo, other)
1637 exchange._pushdiscoverychangeset(op)
1645 exchange._pushdiscoverychangeset(op)
1638
1646
1639 remotesubset = op.fallbackheads
1647 remotesubset = op.fallbackheads
1640
1648
1641 with other.commandexecutor() as e:
1649 with other.commandexecutor() as e:
1642 remotephases = e.callcommand(
1650 remotephases = e.callcommand(
1643 b'listkeys', {b'namespace': b'phases'}
1651 b'listkeys', {b'namespace': b'phases'}
1644 ).result()
1652 ).result()
1645 del other
1653 del other
1646 publishing = remotephases.get(b'publishing', False)
1654 publishing = remotephases.get(b'publishing', False)
1647 if publishing:
1655 if publishing:
1648 ui.statusnoi18n(b'publishing: yes\n')
1656 ui.statusnoi18n(b'publishing: yes\n')
1649 else:
1657 else:
1650 ui.statusnoi18n(b'publishing: no\n')
1658 ui.statusnoi18n(b'publishing: no\n')
1651
1659
1652 has_node = getattr(repo.changelog.index, 'has_node', None)
1660 has_node = getattr(repo.changelog.index, 'has_node', None)
1653 if has_node is None:
1661 if has_node is None:
1654 has_node = repo.changelog.nodemap.__contains__
1662 has_node = repo.changelog.nodemap.__contains__
1655 nonpublishroots = 0
1663 nonpublishroots = 0
1656 for nhex, phase in remotephases.iteritems():
1664 for nhex, phase in remotephases.iteritems():
1657 if nhex == b'publishing': # ignore data related to publish option
1665 if nhex == b'publishing': # ignore data related to publish option
1658 continue
1666 continue
1659 node = bin(nhex)
1667 node = bin(nhex)
1660 if has_node(node) and int(phase):
1668 if has_node(node) and int(phase):
1661 nonpublishroots += 1
1669 nonpublishroots += 1
1662 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1670 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1663 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1671 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1664
1672
1665 def d():
1673 def d():
1666 phases.remotephasessummary(repo, remotesubset, remotephases)
1674 phases.remotephasessummary(repo, remotesubset, remotephases)
1667
1675
1668 timer(d)
1676 timer(d)
1669 fm.end()
1677 fm.end()
1670
1678
1671
1679
1672 @command(
1680 @command(
1673 b'perf::manifest|perfmanifest',
1681 b'perf::manifest|perfmanifest',
1674 [
1682 [
1675 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1683 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1676 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1684 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1677 ]
1685 ]
1678 + formatteropts,
1686 + formatteropts,
1679 b'REV|NODE',
1687 b'REV|NODE',
1680 )
1688 )
1681 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1689 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1682 """benchmark the time to read a manifest from disk and return a usable
1690 """benchmark the time to read a manifest from disk and return a usable
1683 dict-like object
1691 dict-like object
1684
1692
1685 Manifest caches are cleared before retrieval."""
1693 Manifest caches are cleared before retrieval."""
1686 opts = _byteskwargs(opts)
1694 opts = _byteskwargs(opts)
1687 timer, fm = gettimer(ui, opts)
1695 timer, fm = gettimer(ui, opts)
1688 if not manifest_rev:
1696 if not manifest_rev:
1689 ctx = scmutil.revsingle(repo, rev, rev)
1697 ctx = scmutil.revsingle(repo, rev, rev)
1690 t = ctx.manifestnode()
1698 t = ctx.manifestnode()
1691 else:
1699 else:
1692 from mercurial.node import bin
1700 from mercurial.node import bin
1693
1701
1694 if len(rev) == 40:
1702 if len(rev) == 40:
1695 t = bin(rev)
1703 t = bin(rev)
1696 else:
1704 else:
1697 try:
1705 try:
1698 rev = int(rev)
1706 rev = int(rev)
1699
1707
1700 if util.safehasattr(repo.manifestlog, b'getstorage'):
1708 if util.safehasattr(repo.manifestlog, b'getstorage'):
1701 t = repo.manifestlog.getstorage(b'').node(rev)
1709 t = repo.manifestlog.getstorage(b'').node(rev)
1702 else:
1710 else:
1703 t = repo.manifestlog._revlog.lookup(rev)
1711 t = repo.manifestlog._revlog.lookup(rev)
1704 except ValueError:
1712 except ValueError:
1705 raise error.Abort(
1713 raise error.Abort(
1706 b'manifest revision must be integer or full node'
1714 b'manifest revision must be integer or full node'
1707 )
1715 )
1708
1716
1709 def d():
1717 def d():
1710 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1718 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1711 repo.manifestlog[t].read()
1719 repo.manifestlog[t].read()
1712
1720
1713 timer(d)
1721 timer(d)
1714 fm.end()
1722 fm.end()
1715
1723
1716
1724
1717 @command(b'perf::changeset|perfchangeset', formatteropts)
1725 @command(b'perf::changeset|perfchangeset', formatteropts)
1718 def perfchangeset(ui, repo, rev, **opts):
1726 def perfchangeset(ui, repo, rev, **opts):
1719 opts = _byteskwargs(opts)
1727 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1728 timer, fm = gettimer(ui, opts)
1721 n = scmutil.revsingle(repo, rev).node()
1729 n = scmutil.revsingle(repo, rev).node()
1722
1730
1723 def d():
1731 def d():
1724 repo.changelog.read(n)
1732 repo.changelog.read(n)
1725 # repo.changelog._cache = None
1733 # repo.changelog._cache = None
1726
1734
1727 timer(d)
1735 timer(d)
1728 fm.end()
1736 fm.end()
1729
1737
1730
1738
1731 @command(b'perf::ignore|perfignore', formatteropts)
1739 @command(b'perf::ignore|perfignore', formatteropts)
1732 def perfignore(ui, repo, **opts):
1740 def perfignore(ui, repo, **opts):
1733 """benchmark operation related to computing ignore"""
1741 """benchmark operation related to computing ignore"""
1734 opts = _byteskwargs(opts)
1742 opts = _byteskwargs(opts)
1735 timer, fm = gettimer(ui, opts)
1743 timer, fm = gettimer(ui, opts)
1736 dirstate = repo.dirstate
1744 dirstate = repo.dirstate
1737
1745
1738 def setupone():
1746 def setupone():
1739 dirstate.invalidate()
1747 dirstate.invalidate()
1740 clearfilecache(dirstate, b'_ignore')
1748 clearfilecache(dirstate, b'_ignore')
1741
1749
1742 def runone():
1750 def runone():
1743 dirstate._ignore
1751 dirstate._ignore
1744
1752
1745 timer(runone, setup=setupone, title=b"load")
1753 timer(runone, setup=setupone, title=b"load")
1746 fm.end()
1754 fm.end()
1747
1755
1748
1756
1749 @command(
1757 @command(
1750 b'perf::index|perfindex',
1758 b'perf::index|perfindex',
1751 [
1759 [
1752 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1760 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1753 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1761 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1754 ]
1762 ]
1755 + formatteropts,
1763 + formatteropts,
1756 )
1764 )
1757 def perfindex(ui, repo, **opts):
1765 def perfindex(ui, repo, **opts):
1758 """benchmark index creation time followed by a lookup
1766 """benchmark index creation time followed by a lookup
1759
1767
1760 The default is to look `tip` up. Depending on the index implementation,
1768 The default is to look `tip` up. Depending on the index implementation,
1761 the revision looked up can matters. For example, an implementation
1769 the revision looked up can matters. For example, an implementation
1762 scanning the index will have a faster lookup time for `--rev tip` than for
1770 scanning the index will have a faster lookup time for `--rev tip` than for
1763 `--rev 0`. The number of looked up revisions and their order can also
1771 `--rev 0`. The number of looked up revisions and their order can also
1764 matters.
1772 matters.
1765
1773
1766 Example of useful set to test:
1774 Example of useful set to test:
1767
1775
1768 * tip
1776 * tip
1769 * 0
1777 * 0
1770 * -10:
1778 * -10:
1771 * :10
1779 * :10
1772 * -10: + :10
1780 * -10: + :10
1773 * :10: + -10:
1781 * :10: + -10:
1774 * -10000:
1782 * -10000:
1775 * -10000: + 0
1783 * -10000: + 0
1776
1784
1777 It is not currently possible to check for lookup of a missing node. For
1785 It is not currently possible to check for lookup of a missing node. For
1778 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1786 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1779 import mercurial.revlog
1787 import mercurial.revlog
1780
1788
1781 opts = _byteskwargs(opts)
1789 opts = _byteskwargs(opts)
1782 timer, fm = gettimer(ui, opts)
1790 timer, fm = gettimer(ui, opts)
1783 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1784 if opts[b'no_lookup']:
1792 if opts[b'no_lookup']:
1785 if opts['rev']:
1793 if opts['rev']:
1786 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1794 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1787 nodes = []
1795 nodes = []
1788 elif not opts[b'rev']:
1796 elif not opts[b'rev']:
1789 nodes = [repo[b"tip"].node()]
1797 nodes = [repo[b"tip"].node()]
1790 else:
1798 else:
1791 revs = scmutil.revrange(repo, opts[b'rev'])
1799 revs = scmutil.revrange(repo, opts[b'rev'])
1792 cl = repo.changelog
1800 cl = repo.changelog
1793 nodes = [cl.node(r) for r in revs]
1801 nodes = [cl.node(r) for r in revs]
1794
1802
1795 unfi = repo.unfiltered()
1803 unfi = repo.unfiltered()
1796 # find the filecache func directly
1804 # find the filecache func directly
1797 # This avoid polluting the benchmark with the filecache logic
1805 # This avoid polluting the benchmark with the filecache logic
1798 makecl = unfi.__class__.changelog.func
1806 makecl = unfi.__class__.changelog.func
1799
1807
1800 def setup():
1808 def setup():
1801 # probably not necessary, but for good measure
1809 # probably not necessary, but for good measure
1802 clearchangelog(unfi)
1810 clearchangelog(unfi)
1803
1811
1804 def d():
1812 def d():
1805 cl = makecl(unfi)
1813 cl = makecl(unfi)
1806 for n in nodes:
1814 for n in nodes:
1807 cl.rev(n)
1815 cl.rev(n)
1808
1816
1809 timer(d, setup=setup)
1817 timer(d, setup=setup)
1810 fm.end()
1818 fm.end()
1811
1819
1812
1820
1813 @command(
1821 @command(
1814 b'perf::nodemap|perfnodemap',
1822 b'perf::nodemap|perfnodemap',
1815 [
1823 [
1816 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1824 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1817 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1825 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1818 ]
1826 ]
1819 + formatteropts,
1827 + formatteropts,
1820 )
1828 )
1821 def perfnodemap(ui, repo, **opts):
1829 def perfnodemap(ui, repo, **opts):
1822 """benchmark the time necessary to look up revision from a cold nodemap
1830 """benchmark the time necessary to look up revision from a cold nodemap
1823
1831
1824 Depending on the implementation, the amount and order of revision we look
1832 Depending on the implementation, the amount and order of revision we look
1825 up can varies. Example of useful set to test:
1833 up can varies. Example of useful set to test:
1826 * tip
1834 * tip
1827 * 0
1835 * 0
1828 * -10:
1836 * -10:
1829 * :10
1837 * :10
1830 * -10: + :10
1838 * -10: + :10
1831 * :10: + -10:
1839 * :10: + -10:
1832 * -10000:
1840 * -10000:
1833 * -10000: + 0
1841 * -10000: + 0
1834
1842
1835 The command currently focus on valid binary lookup. Benchmarking for
1843 The command currently focus on valid binary lookup. Benchmarking for
1836 hexlookup, prefix lookup and missing lookup would also be valuable.
1844 hexlookup, prefix lookup and missing lookup would also be valuable.
1837 """
1845 """
1838 import mercurial.revlog
1846 import mercurial.revlog
1839
1847
1840 opts = _byteskwargs(opts)
1848 opts = _byteskwargs(opts)
1841 timer, fm = gettimer(ui, opts)
1849 timer, fm = gettimer(ui, opts)
1842 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1850 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1843
1851
1844 unfi = repo.unfiltered()
1852 unfi = repo.unfiltered()
1845 clearcaches = opts[b'clear_caches']
1853 clearcaches = opts[b'clear_caches']
1846 # find the filecache func directly
1854 # find the filecache func directly
1847 # This avoid polluting the benchmark with the filecache logic
1855 # This avoid polluting the benchmark with the filecache logic
1848 makecl = unfi.__class__.changelog.func
1856 makecl = unfi.__class__.changelog.func
1849 if not opts[b'rev']:
1857 if not opts[b'rev']:
1850 raise error.Abort(b'use --rev to specify revisions to look up')
1858 raise error.Abort(b'use --rev to specify revisions to look up')
1851 revs = scmutil.revrange(repo, opts[b'rev'])
1859 revs = scmutil.revrange(repo, opts[b'rev'])
1852 cl = repo.changelog
1860 cl = repo.changelog
1853 nodes = [cl.node(r) for r in revs]
1861 nodes = [cl.node(r) for r in revs]
1854
1862
1855 # use a list to pass reference to a nodemap from one closure to the next
1863 # use a list to pass reference to a nodemap from one closure to the next
1856 nodeget = [None]
1864 nodeget = [None]
1857
1865
1858 def setnodeget():
1866 def setnodeget():
1859 # probably not necessary, but for good measure
1867 # probably not necessary, but for good measure
1860 clearchangelog(unfi)
1868 clearchangelog(unfi)
1861 cl = makecl(unfi)
1869 cl = makecl(unfi)
1862 if util.safehasattr(cl.index, 'get_rev'):
1870 if util.safehasattr(cl.index, 'get_rev'):
1863 nodeget[0] = cl.index.get_rev
1871 nodeget[0] = cl.index.get_rev
1864 else:
1872 else:
1865 nodeget[0] = cl.nodemap.get
1873 nodeget[0] = cl.nodemap.get
1866
1874
1867 def d():
1875 def d():
1868 get = nodeget[0]
1876 get = nodeget[0]
1869 for n in nodes:
1877 for n in nodes:
1870 get(n)
1878 get(n)
1871
1879
1872 setup = None
1880 setup = None
1873 if clearcaches:
1881 if clearcaches:
1874
1882
1875 def setup():
1883 def setup():
1876 setnodeget()
1884 setnodeget()
1877
1885
1878 else:
1886 else:
1879 setnodeget()
1887 setnodeget()
1880 d() # prewarm the data structure
1888 d() # prewarm the data structure
1881 timer(d, setup=setup)
1889 timer(d, setup=setup)
1882 fm.end()
1890 fm.end()
1883
1891
1884
1892
1885 @command(b'perf::startup|perfstartup', formatteropts)
1893 @command(b'perf::startup|perfstartup', formatteropts)
1886 def perfstartup(ui, repo, **opts):
1894 def perfstartup(ui, repo, **opts):
1887 opts = _byteskwargs(opts)
1895 opts = _byteskwargs(opts)
1888 timer, fm = gettimer(ui, opts)
1896 timer, fm = gettimer(ui, opts)
1889
1897
1890 def d():
1898 def d():
1891 if os.name != 'nt':
1899 if os.name != 'nt':
1892 os.system(
1900 os.system(
1893 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1901 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1894 )
1902 )
1895 else:
1903 else:
1896 os.environ['HGRCPATH'] = r' '
1904 os.environ['HGRCPATH'] = r' '
1897 os.system("%s version -q > NUL" % sys.argv[0])
1905 os.system("%s version -q > NUL" % sys.argv[0])
1898
1906
1899 timer(d)
1907 timer(d)
1900 fm.end()
1908 fm.end()
1901
1909
1902
1910
1903 def _find_stream_generator(version):
1911 def _find_stream_generator(version):
1904 """find the proper generator function for this stream version"""
1912 """find the proper generator function for this stream version"""
1905 import mercurial.streamclone
1913 import mercurial.streamclone
1906
1914
1907 available = {}
1915 available = {}
1908
1916
1909 # try to fetch a v1 generator
1917 # try to fetch a v1 generator
1910 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1918 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1911 if generatev1 is not None:
1919 if generatev1 is not None:
1912
1920
1913 def generate(repo):
1921 def generate(repo):
1914 entries, bytes, data = generatev2(repo, None, None, True)
1922 entries, bytes, data = generatev2(repo, None, None, True)
1915 return data
1923 return data
1916
1924
1917 available[b'v1'] = generatev1
1925 available[b'v1'] = generatev1
1918 # try to fetch a v2 generator
1926 # try to fetch a v2 generator
1919 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1927 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1920 if generatev2 is not None:
1928 if generatev2 is not None:
1921
1929
1922 def generate(repo):
1930 def generate(repo):
1923 entries, bytes, data = generatev2(repo, None, None, True)
1931 entries, bytes, data = generatev2(repo, None, None, True)
1924 return data
1932 return data
1925
1933
1926 available[b'v2'] = generate
1934 available[b'v2'] = generate
1927 # try to fetch a v3 generator
1935 # try to fetch a v3 generator
1928 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1936 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1929 if generatev3 is not None:
1937 if generatev3 is not None:
1930
1938
1931 def generate(repo):
1939 def generate(repo):
1932 entries, bytes, data = generatev3(repo, None, None, True)
1940 entries, bytes, data = generatev3(repo, None, None, True)
1933 return data
1941 return data
1934
1942
1935 available[b'v3-exp'] = generate
1943 available[b'v3-exp'] = generate
1936
1944
1937 # resolve the request
1945 # resolve the request
1938 if version == b"latest":
1946 if version == b"latest":
1939 # latest is the highest non experimental version
1947 # latest is the highest non experimental version
1940 latest_key = max(v for v in available if b'-exp' not in v)
1948 latest_key = max(v for v in available if b'-exp' not in v)
1941 return available[latest_key]
1949 return available[latest_key]
1942 elif version in available:
1950 elif version in available:
1943 return available[version]
1951 return available[version]
1944 else:
1952 else:
1945 msg = b"unkown or unavailable version: %s"
1953 msg = b"unkown or unavailable version: %s"
1946 msg %= version
1954 msg %= version
1947 hint = b"available versions: %s"
1955 hint = b"available versions: %s"
1948 hint %= b', '.join(sorted(available))
1956 hint %= b', '.join(sorted(available))
1949 raise error.Abort(msg, hint=hint)
1957 raise error.Abort(msg, hint=hint)
1950
1958
1951
1959
1952 @command(
1960 @command(
1953 b'perf::stream-locked-section',
1961 b'perf::stream-locked-section',
1954 [
1962 [
1955 (
1963 (
1956 b'',
1964 b'',
1957 b'stream-version',
1965 b'stream-version',
1958 b'latest',
1966 b'latest',
1959 b'stream version to us ("v1", "v2" or "latest", (the default))',
1967 b'stream version to us ("v1", "v2" or "latest", (the default))',
1960 ),
1968 ),
1961 ]
1969 ]
1962 + formatteropts,
1970 + formatteropts,
1963 )
1971 )
1964 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1972 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1965 """benchmark the initial, repo-locked, section of a stream-clone"""
1973 """benchmark the initial, repo-locked, section of a stream-clone"""
1966
1974
1967 opts = _byteskwargs(opts)
1975 opts = _byteskwargs(opts)
1968 timer, fm = gettimer(ui, opts)
1976 timer, fm = gettimer(ui, opts)
1969
1977
1970 # deletion of the generator may trigger some cleanup that we do not want to
1978 # deletion of the generator may trigger some cleanup that we do not want to
1971 # measure
1979 # measure
1972 result_holder = [None]
1980 result_holder = [None]
1973
1981
1974 def setupone():
1982 def setupone():
1975 result_holder[0] = None
1983 result_holder[0] = None
1976
1984
1977 generate = _find_stream_generator(stream_version)
1985 generate = _find_stream_generator(stream_version)
1978
1986
1979 def runone():
1987 def runone():
1980 # the lock is held for the duration the initialisation
1988 # the lock is held for the duration the initialisation
1981 result_holder[0] = generate(repo)
1989 result_holder[0] = generate(repo)
1982
1990
1983 timer(runone, setup=setupone, title=b"load")
1991 timer(runone, setup=setupone, title=b"load")
1984 fm.end()
1992 fm.end()
1985
1993
1986
1994
1987 @command(b'perf::parents|perfparents', formatteropts)
1995 @command(b'perf::parents|perfparents', formatteropts)
1988 def perfparents(ui, repo, **opts):
1996 def perfparents(ui, repo, **opts):
1989 """benchmark the time necessary to fetch one changeset's parents.
1997 """benchmark the time necessary to fetch one changeset's parents.
1990
1998
1991 The fetch is done using the `node identifier`, traversing all object layers
1999 The fetch is done using the `node identifier`, traversing all object layers
1992 from the repository object. The first N revisions will be used for this
2000 from the repository object. The first N revisions will be used for this
1993 benchmark. N is controlled by the ``perf.parentscount`` config option
2001 benchmark. N is controlled by the ``perf.parentscount`` config option
1994 (default: 1000).
2002 (default: 1000).
1995 """
2003 """
1996 opts = _byteskwargs(opts)
2004 opts = _byteskwargs(opts)
1997 timer, fm = gettimer(ui, opts)
2005 timer, fm = gettimer(ui, opts)
1998 # control the number of commits perfparents iterates over
2006 # control the number of commits perfparents iterates over
1999 # experimental config: perf.parentscount
2007 # experimental config: perf.parentscount
2000 count = getint(ui, b"perf", b"parentscount", 1000)
2008 count = getint(ui, b"perf", b"parentscount", 1000)
2001 if len(repo.changelog) < count:
2009 if len(repo.changelog) < count:
2002 raise error.Abort(b"repo needs %d commits for this test" % count)
2010 raise error.Abort(b"repo needs %d commits for this test" % count)
2003 repo = repo.unfiltered()
2011 repo = repo.unfiltered()
2004 nl = [repo.changelog.node(i) for i in _xrange(count)]
2012 nl = [repo.changelog.node(i) for i in _xrange(count)]
2005
2013
2006 def d():
2014 def d():
2007 for n in nl:
2015 for n in nl:
2008 repo.changelog.parents(n)
2016 repo.changelog.parents(n)
2009
2017
2010 timer(d)
2018 timer(d)
2011 fm.end()
2019 fm.end()
2012
2020
2013
2021
2014 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2022 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2015 def perfctxfiles(ui, repo, x, **opts):
2023 def perfctxfiles(ui, repo, x, **opts):
2016 opts = _byteskwargs(opts)
2024 opts = _byteskwargs(opts)
2017 x = int(x)
2025 x = int(x)
2018 timer, fm = gettimer(ui, opts)
2026 timer, fm = gettimer(ui, opts)
2019
2027
2020 def d():
2028 def d():
2021 len(repo[x].files())
2029 len(repo[x].files())
2022
2030
2023 timer(d)
2031 timer(d)
2024 fm.end()
2032 fm.end()
2025
2033
2026
2034
2027 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2035 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2028 def perfrawfiles(ui, repo, x, **opts):
2036 def perfrawfiles(ui, repo, x, **opts):
2029 opts = _byteskwargs(opts)
2037 opts = _byteskwargs(opts)
2030 x = int(x)
2038 x = int(x)
2031 timer, fm = gettimer(ui, opts)
2039 timer, fm = gettimer(ui, opts)
2032 cl = repo.changelog
2040 cl = repo.changelog
2033
2041
2034 def d():
2042 def d():
2035 len(cl.read(x)[3])
2043 len(cl.read(x)[3])
2036
2044
2037 timer(d)
2045 timer(d)
2038 fm.end()
2046 fm.end()
2039
2047
2040
2048
2041 @command(b'perf::lookup|perflookup', formatteropts)
2049 @command(b'perf::lookup|perflookup', formatteropts)
2042 def perflookup(ui, repo, rev, **opts):
2050 def perflookup(ui, repo, rev, **opts):
2043 opts = _byteskwargs(opts)
2051 opts = _byteskwargs(opts)
2044 timer, fm = gettimer(ui, opts)
2052 timer, fm = gettimer(ui, opts)
2045 timer(lambda: len(repo.lookup(rev)))
2053 timer(lambda: len(repo.lookup(rev)))
2046 fm.end()
2054 fm.end()
2047
2055
2048
2056
2049 @command(
2057 @command(
2050 b'perf::linelogedits|perflinelogedits',
2058 b'perf::linelogedits|perflinelogedits',
2051 [
2059 [
2052 (b'n', b'edits', 10000, b'number of edits'),
2060 (b'n', b'edits', 10000, b'number of edits'),
2053 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2061 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2054 ],
2062 ],
2055 norepo=True,
2063 norepo=True,
2056 )
2064 )
2057 def perflinelogedits(ui, **opts):
2065 def perflinelogedits(ui, **opts):
2058 from mercurial import linelog
2066 from mercurial import linelog
2059
2067
2060 opts = _byteskwargs(opts)
2068 opts = _byteskwargs(opts)
2061
2069
2062 edits = opts[b'edits']
2070 edits = opts[b'edits']
2063 maxhunklines = opts[b'max_hunk_lines']
2071 maxhunklines = opts[b'max_hunk_lines']
2064
2072
2065 maxb1 = 100000
2073 maxb1 = 100000
2066 random.seed(0)
2074 random.seed(0)
2067 randint = random.randint
2075 randint = random.randint
2068 currentlines = 0
2076 currentlines = 0
2069 arglist = []
2077 arglist = []
2070 for rev in _xrange(edits):
2078 for rev in _xrange(edits):
2071 a1 = randint(0, currentlines)
2079 a1 = randint(0, currentlines)
2072 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2080 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2073 b1 = randint(0, maxb1)
2081 b1 = randint(0, maxb1)
2074 b2 = randint(b1, b1 + maxhunklines)
2082 b2 = randint(b1, b1 + maxhunklines)
2075 currentlines += (b2 - b1) - (a2 - a1)
2083 currentlines += (b2 - b1) - (a2 - a1)
2076 arglist.append((rev, a1, a2, b1, b2))
2084 arglist.append((rev, a1, a2, b1, b2))
2077
2085
2078 def d():
2086 def d():
2079 ll = linelog.linelog()
2087 ll = linelog.linelog()
2080 for args in arglist:
2088 for args in arglist:
2081 ll.replacelines(*args)
2089 ll.replacelines(*args)
2082
2090
2083 timer, fm = gettimer(ui, opts)
2091 timer, fm = gettimer(ui, opts)
2084 timer(d)
2092 timer(d)
2085 fm.end()
2093 fm.end()
2086
2094
2087
2095
2088 @command(b'perf::revrange|perfrevrange', formatteropts)
2096 @command(b'perf::revrange|perfrevrange', formatteropts)
2089 def perfrevrange(ui, repo, *specs, **opts):
2097 def perfrevrange(ui, repo, *specs, **opts):
2090 opts = _byteskwargs(opts)
2098 opts = _byteskwargs(opts)
2091 timer, fm = gettimer(ui, opts)
2099 timer, fm = gettimer(ui, opts)
2092 revrange = scmutil.revrange
2100 revrange = scmutil.revrange
2093 timer(lambda: len(revrange(repo, specs)))
2101 timer(lambda: len(revrange(repo, specs)))
2094 fm.end()
2102 fm.end()
2095
2103
2096
2104
2097 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2105 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2098 def perfnodelookup(ui, repo, rev, **opts):
2106 def perfnodelookup(ui, repo, rev, **opts):
2099 opts = _byteskwargs(opts)
2107 opts = _byteskwargs(opts)
2100 timer, fm = gettimer(ui, opts)
2108 timer, fm = gettimer(ui, opts)
2101 import mercurial.revlog
2109 import mercurial.revlog
2102
2110
2103 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2111 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2104 n = scmutil.revsingle(repo, rev).node()
2112 n = scmutil.revsingle(repo, rev).node()
2105
2113
2106 try:
2114 try:
2107 cl = revlog(getsvfs(repo), radix=b"00changelog")
2115 cl = revlog(getsvfs(repo), radix=b"00changelog")
2108 except TypeError:
2116 except TypeError:
2109 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2117 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2110
2118
2111 def d():
2119 def d():
2112 cl.rev(n)
2120 cl.rev(n)
2113 clearcaches(cl)
2121 clearcaches(cl)
2114
2122
2115 timer(d)
2123 timer(d)
2116 fm.end()
2124 fm.end()
2117
2125
2118
2126
2119 @command(
2127 @command(
2120 b'perf::log|perflog',
2128 b'perf::log|perflog',
2121 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2129 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2122 )
2130 )
2123 def perflog(ui, repo, rev=None, **opts):
2131 def perflog(ui, repo, rev=None, **opts):
2124 opts = _byteskwargs(opts)
2132 opts = _byteskwargs(opts)
2125 if rev is None:
2133 if rev is None:
2126 rev = []
2134 rev = []
2127 timer, fm = gettimer(ui, opts)
2135 timer, fm = gettimer(ui, opts)
2128 ui.pushbuffer()
2136 ui.pushbuffer()
2129 timer(
2137 timer(
2130 lambda: commands.log(
2138 lambda: commands.log(
2131 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2139 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2132 )
2140 )
2133 )
2141 )
2134 ui.popbuffer()
2142 ui.popbuffer()
2135 fm.end()
2143 fm.end()
2136
2144
2137
2145
2138 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2146 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2139 def perfmoonwalk(ui, repo, **opts):
2147 def perfmoonwalk(ui, repo, **opts):
2140 """benchmark walking the changelog backwards
2148 """benchmark walking the changelog backwards
2141
2149
2142 This also loads the changelog data for each revision in the changelog.
2150 This also loads the changelog data for each revision in the changelog.
2143 """
2151 """
2144 opts = _byteskwargs(opts)
2152 opts = _byteskwargs(opts)
2145 timer, fm = gettimer(ui, opts)
2153 timer, fm = gettimer(ui, opts)
2146
2154
2147 def moonwalk():
2155 def moonwalk():
2148 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2156 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2149 ctx = repo[i]
2157 ctx = repo[i]
2150 ctx.branch() # read changelog data (in addition to the index)
2158 ctx.branch() # read changelog data (in addition to the index)
2151
2159
2152 timer(moonwalk)
2160 timer(moonwalk)
2153 fm.end()
2161 fm.end()
2154
2162
2155
2163
2156 @command(
2164 @command(
2157 b'perf::templating|perftemplating',
2165 b'perf::templating|perftemplating',
2158 [
2166 [
2159 (b'r', b'rev', [], b'revisions to run the template on'),
2167 (b'r', b'rev', [], b'revisions to run the template on'),
2160 ]
2168 ]
2161 + formatteropts,
2169 + formatteropts,
2162 )
2170 )
2163 def perftemplating(ui, repo, testedtemplate=None, **opts):
2171 def perftemplating(ui, repo, testedtemplate=None, **opts):
2164 """test the rendering time of a given template"""
2172 """test the rendering time of a given template"""
2165 if makelogtemplater is None:
2173 if makelogtemplater is None:
2166 raise error.Abort(
2174 raise error.Abort(
2167 b"perftemplating not available with this Mercurial",
2175 b"perftemplating not available with this Mercurial",
2168 hint=b"use 4.3 or later",
2176 hint=b"use 4.3 or later",
2169 )
2177 )
2170
2178
2171 opts = _byteskwargs(opts)
2179 opts = _byteskwargs(opts)
2172
2180
2173 nullui = ui.copy()
2181 nullui = ui.copy()
2174 nullui.fout = open(os.devnull, 'wb')
2182 nullui.fout = open(os.devnull, 'wb')
2175 nullui.disablepager()
2183 nullui.disablepager()
2176 revs = opts.get(b'rev')
2184 revs = opts.get(b'rev')
2177 if not revs:
2185 if not revs:
2178 revs = [b'all()']
2186 revs = [b'all()']
2179 revs = list(scmutil.revrange(repo, revs))
2187 revs = list(scmutil.revrange(repo, revs))
2180
2188
2181 defaulttemplate = (
2189 defaulttemplate = (
2182 b'{date|shortdate} [{rev}:{node|short}]'
2190 b'{date|shortdate} [{rev}:{node|short}]'
2183 b' {author|person}: {desc|firstline}\n'
2191 b' {author|person}: {desc|firstline}\n'
2184 )
2192 )
2185 if testedtemplate is None:
2193 if testedtemplate is None:
2186 testedtemplate = defaulttemplate
2194 testedtemplate = defaulttemplate
2187 displayer = makelogtemplater(nullui, repo, testedtemplate)
2195 displayer = makelogtemplater(nullui, repo, testedtemplate)
2188
2196
2189 def format():
2197 def format():
2190 for r in revs:
2198 for r in revs:
2191 ctx = repo[r]
2199 ctx = repo[r]
2192 displayer.show(ctx)
2200 displayer.show(ctx)
2193 displayer.flush(ctx)
2201 displayer.flush(ctx)
2194
2202
2195 timer, fm = gettimer(ui, opts)
2203 timer, fm = gettimer(ui, opts)
2196 timer(format)
2204 timer(format)
2197 fm.end()
2205 fm.end()
2198
2206
2199
2207
2200 def _displaystats(ui, opts, entries, data):
2208 def _displaystats(ui, opts, entries, data):
2201 # use a second formatter because the data are quite different, not sure
2209 # use a second formatter because the data are quite different, not sure
2202 # how it flies with the templater.
2210 # how it flies with the templater.
2203 fm = ui.formatter(b'perf-stats', opts)
2211 fm = ui.formatter(b'perf-stats', opts)
2204 for key, title in entries:
2212 for key, title in entries:
2205 values = data[key]
2213 values = data[key]
2206 nbvalues = len(data)
2214 nbvalues = len(data)
2207 values.sort()
2215 values.sort()
2208 stats = {
2216 stats = {
2209 'key': key,
2217 'key': key,
2210 'title': title,
2218 'title': title,
2211 'nbitems': len(values),
2219 'nbitems': len(values),
2212 'min': values[0][0],
2220 'min': values[0][0],
2213 '10%': values[(nbvalues * 10) // 100][0],
2221 '10%': values[(nbvalues * 10) // 100][0],
2214 '25%': values[(nbvalues * 25) // 100][0],
2222 '25%': values[(nbvalues * 25) // 100][0],
2215 '50%': values[(nbvalues * 50) // 100][0],
2223 '50%': values[(nbvalues * 50) // 100][0],
2216 '75%': values[(nbvalues * 75) // 100][0],
2224 '75%': values[(nbvalues * 75) // 100][0],
2217 '80%': values[(nbvalues * 80) // 100][0],
2225 '80%': values[(nbvalues * 80) // 100][0],
2218 '85%': values[(nbvalues * 85) // 100][0],
2226 '85%': values[(nbvalues * 85) // 100][0],
2219 '90%': values[(nbvalues * 90) // 100][0],
2227 '90%': values[(nbvalues * 90) // 100][0],
2220 '95%': values[(nbvalues * 95) // 100][0],
2228 '95%': values[(nbvalues * 95) // 100][0],
2221 '99%': values[(nbvalues * 99) // 100][0],
2229 '99%': values[(nbvalues * 99) // 100][0],
2222 'max': values[-1][0],
2230 'max': values[-1][0],
2223 }
2231 }
2224 fm.startitem()
2232 fm.startitem()
2225 fm.data(**stats)
2233 fm.data(**stats)
2226 # make node pretty for the human output
2234 # make node pretty for the human output
2227 fm.plain('### %s (%d items)\n' % (title, len(values)))
2235 fm.plain('### %s (%d items)\n' % (title, len(values)))
2228 lines = [
2236 lines = [
2229 'min',
2237 'min',
2230 '10%',
2238 '10%',
2231 '25%',
2239 '25%',
2232 '50%',
2240 '50%',
2233 '75%',
2241 '75%',
2234 '80%',
2242 '80%',
2235 '85%',
2243 '85%',
2236 '90%',
2244 '90%',
2237 '95%',
2245 '95%',
2238 '99%',
2246 '99%',
2239 'max',
2247 'max',
2240 ]
2248 ]
2241 for l in lines:
2249 for l in lines:
2242 fm.plain('%s: %s\n' % (l, stats[l]))
2250 fm.plain('%s: %s\n' % (l, stats[l]))
2243 fm.end()
2251 fm.end()
2244
2252
2245
2253
2246 @command(
2254 @command(
2247 b'perf::helper-mergecopies|perfhelper-mergecopies',
2255 b'perf::helper-mergecopies|perfhelper-mergecopies',
2248 formatteropts
2256 formatteropts
2249 + [
2257 + [
2250 (b'r', b'revs', [], b'restrict search to these revisions'),
2258 (b'r', b'revs', [], b'restrict search to these revisions'),
2251 (b'', b'timing', False, b'provides extra data (costly)'),
2259 (b'', b'timing', False, b'provides extra data (costly)'),
2252 (b'', b'stats', False, b'provides statistic about the measured data'),
2260 (b'', b'stats', False, b'provides statistic about the measured data'),
2253 ],
2261 ],
2254 )
2262 )
2255 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2263 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2256 """find statistics about potential parameters for `perfmergecopies`
2264 """find statistics about potential parameters for `perfmergecopies`
2257
2265
2258 This command find (base, p1, p2) triplet relevant for copytracing
2266 This command find (base, p1, p2) triplet relevant for copytracing
2259 benchmarking in the context of a merge. It reports values for some of the
2267 benchmarking in the context of a merge. It reports values for some of the
2260 parameters that impact merge copy tracing time during merge.
2268 parameters that impact merge copy tracing time during merge.
2261
2269
2262 If `--timing` is set, rename detection is run and the associated timing
2270 If `--timing` is set, rename detection is run and the associated timing
2263 will be reported. The extra details come at the cost of slower command
2271 will be reported. The extra details come at the cost of slower command
2264 execution.
2272 execution.
2265
2273
2266 Since rename detection is only run once, other factors might easily
2274 Since rename detection is only run once, other factors might easily
2267 affect the precision of the timing. However it should give a good
2275 affect the precision of the timing. However it should give a good
2268 approximation of which revision triplets are very costly.
2276 approximation of which revision triplets are very costly.
2269 """
2277 """
2270 opts = _byteskwargs(opts)
2278 opts = _byteskwargs(opts)
2271 fm = ui.formatter(b'perf', opts)
2279 fm = ui.formatter(b'perf', opts)
2272 dotiming = opts[b'timing']
2280 dotiming = opts[b'timing']
2273 dostats = opts[b'stats']
2281 dostats = opts[b'stats']
2274
2282
2275 output_template = [
2283 output_template = [
2276 ("base", "%(base)12s"),
2284 ("base", "%(base)12s"),
2277 ("p1", "%(p1.node)12s"),
2285 ("p1", "%(p1.node)12s"),
2278 ("p2", "%(p2.node)12s"),
2286 ("p2", "%(p2.node)12s"),
2279 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2287 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2280 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2288 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2281 ("p1.renames", "%(p1.renamedfiles)12d"),
2289 ("p1.renames", "%(p1.renamedfiles)12d"),
2282 ("p1.time", "%(p1.time)12.3f"),
2290 ("p1.time", "%(p1.time)12.3f"),
2283 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2291 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2284 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2292 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2285 ("p2.renames", "%(p2.renamedfiles)12d"),
2293 ("p2.renames", "%(p2.renamedfiles)12d"),
2286 ("p2.time", "%(p2.time)12.3f"),
2294 ("p2.time", "%(p2.time)12.3f"),
2287 ("renames", "%(nbrenamedfiles)12d"),
2295 ("renames", "%(nbrenamedfiles)12d"),
2288 ("total.time", "%(time)12.3f"),
2296 ("total.time", "%(time)12.3f"),
2289 ]
2297 ]
2290 if not dotiming:
2298 if not dotiming:
2291 output_template = [
2299 output_template = [
2292 i
2300 i
2293 for i in output_template
2301 for i in output_template
2294 if not ('time' in i[0] or 'renames' in i[0])
2302 if not ('time' in i[0] or 'renames' in i[0])
2295 ]
2303 ]
2296 header_names = [h for (h, v) in output_template]
2304 header_names = [h for (h, v) in output_template]
2297 output = ' '.join([v for (h, v) in output_template]) + '\n'
2305 output = ' '.join([v for (h, v) in output_template]) + '\n'
2298 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2306 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2299 fm.plain(header % tuple(header_names))
2307 fm.plain(header % tuple(header_names))
2300
2308
2301 if not revs:
2309 if not revs:
2302 revs = ['all()']
2310 revs = ['all()']
2303 revs = scmutil.revrange(repo, revs)
2311 revs = scmutil.revrange(repo, revs)
2304
2312
2305 if dostats:
2313 if dostats:
2306 alldata = {
2314 alldata = {
2307 'nbrevs': [],
2315 'nbrevs': [],
2308 'nbmissingfiles': [],
2316 'nbmissingfiles': [],
2309 }
2317 }
2310 if dotiming:
2318 if dotiming:
2311 alldata['parentnbrenames'] = []
2319 alldata['parentnbrenames'] = []
2312 alldata['totalnbrenames'] = []
2320 alldata['totalnbrenames'] = []
2313 alldata['parenttime'] = []
2321 alldata['parenttime'] = []
2314 alldata['totaltime'] = []
2322 alldata['totaltime'] = []
2315
2323
2316 roi = repo.revs('merge() and %ld', revs)
2324 roi = repo.revs('merge() and %ld', revs)
2317 for r in roi:
2325 for r in roi:
2318 ctx = repo[r]
2326 ctx = repo[r]
2319 p1 = ctx.p1()
2327 p1 = ctx.p1()
2320 p2 = ctx.p2()
2328 p2 = ctx.p2()
2321 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2329 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2322 for b in bases:
2330 for b in bases:
2323 b = repo[b]
2331 b = repo[b]
2324 p1missing = copies._computeforwardmissing(b, p1)
2332 p1missing = copies._computeforwardmissing(b, p1)
2325 p2missing = copies._computeforwardmissing(b, p2)
2333 p2missing = copies._computeforwardmissing(b, p2)
2326 data = {
2334 data = {
2327 b'base': b.hex(),
2335 b'base': b.hex(),
2328 b'p1.node': p1.hex(),
2336 b'p1.node': p1.hex(),
2329 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2337 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2330 b'p1.nbmissingfiles': len(p1missing),
2338 b'p1.nbmissingfiles': len(p1missing),
2331 b'p2.node': p2.hex(),
2339 b'p2.node': p2.hex(),
2332 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2340 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2333 b'p2.nbmissingfiles': len(p2missing),
2341 b'p2.nbmissingfiles': len(p2missing),
2334 }
2342 }
2335 if dostats:
2343 if dostats:
2336 if p1missing:
2344 if p1missing:
2337 alldata['nbrevs'].append(
2345 alldata['nbrevs'].append(
2338 (data['p1.nbrevs'], b.hex(), p1.hex())
2346 (data['p1.nbrevs'], b.hex(), p1.hex())
2339 )
2347 )
2340 alldata['nbmissingfiles'].append(
2348 alldata['nbmissingfiles'].append(
2341 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2349 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2342 )
2350 )
2343 if p2missing:
2351 if p2missing:
2344 alldata['nbrevs'].append(
2352 alldata['nbrevs'].append(
2345 (data['p2.nbrevs'], b.hex(), p2.hex())
2353 (data['p2.nbrevs'], b.hex(), p2.hex())
2346 )
2354 )
2347 alldata['nbmissingfiles'].append(
2355 alldata['nbmissingfiles'].append(
2348 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2356 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2349 )
2357 )
2350 if dotiming:
2358 if dotiming:
2351 begin = util.timer()
2359 begin = util.timer()
2352 mergedata = copies.mergecopies(repo, p1, p2, b)
2360 mergedata = copies.mergecopies(repo, p1, p2, b)
2353 end = util.timer()
2361 end = util.timer()
2354 # not very stable timing since we did only one run
2362 # not very stable timing since we did only one run
2355 data['time'] = end - begin
2363 data['time'] = end - begin
2356 # mergedata contains five dicts: "copy", "movewithdir",
2364 # mergedata contains five dicts: "copy", "movewithdir",
2357 # "diverge", "renamedelete" and "dirmove".
2365 # "diverge", "renamedelete" and "dirmove".
2358 # The first 4 are about renamed file so lets count that.
2366 # The first 4 are about renamed file so lets count that.
2359 renames = len(mergedata[0])
2367 renames = len(mergedata[0])
2360 renames += len(mergedata[1])
2368 renames += len(mergedata[1])
2361 renames += len(mergedata[2])
2369 renames += len(mergedata[2])
2362 renames += len(mergedata[3])
2370 renames += len(mergedata[3])
2363 data['nbrenamedfiles'] = renames
2371 data['nbrenamedfiles'] = renames
2364 begin = util.timer()
2372 begin = util.timer()
2365 p1renames = copies.pathcopies(b, p1)
2373 p1renames = copies.pathcopies(b, p1)
2366 end = util.timer()
2374 end = util.timer()
2367 data['p1.time'] = end - begin
2375 data['p1.time'] = end - begin
2368 begin = util.timer()
2376 begin = util.timer()
2369 p2renames = copies.pathcopies(b, p2)
2377 p2renames = copies.pathcopies(b, p2)
2370 end = util.timer()
2378 end = util.timer()
2371 data['p2.time'] = end - begin
2379 data['p2.time'] = end - begin
2372 data['p1.renamedfiles'] = len(p1renames)
2380 data['p1.renamedfiles'] = len(p1renames)
2373 data['p2.renamedfiles'] = len(p2renames)
2381 data['p2.renamedfiles'] = len(p2renames)
2374
2382
2375 if dostats:
2383 if dostats:
2376 if p1missing:
2384 if p1missing:
2377 alldata['parentnbrenames'].append(
2385 alldata['parentnbrenames'].append(
2378 (data['p1.renamedfiles'], b.hex(), p1.hex())
2386 (data['p1.renamedfiles'], b.hex(), p1.hex())
2379 )
2387 )
2380 alldata['parenttime'].append(
2388 alldata['parenttime'].append(
2381 (data['p1.time'], b.hex(), p1.hex())
2389 (data['p1.time'], b.hex(), p1.hex())
2382 )
2390 )
2383 if p2missing:
2391 if p2missing:
2384 alldata['parentnbrenames'].append(
2392 alldata['parentnbrenames'].append(
2385 (data['p2.renamedfiles'], b.hex(), p2.hex())
2393 (data['p2.renamedfiles'], b.hex(), p2.hex())
2386 )
2394 )
2387 alldata['parenttime'].append(
2395 alldata['parenttime'].append(
2388 (data['p2.time'], b.hex(), p2.hex())
2396 (data['p2.time'], b.hex(), p2.hex())
2389 )
2397 )
2390 if p1missing or p2missing:
2398 if p1missing or p2missing:
2391 alldata['totalnbrenames'].append(
2399 alldata['totalnbrenames'].append(
2392 (
2400 (
2393 data['nbrenamedfiles'],
2401 data['nbrenamedfiles'],
2394 b.hex(),
2402 b.hex(),
2395 p1.hex(),
2403 p1.hex(),
2396 p2.hex(),
2404 p2.hex(),
2397 )
2405 )
2398 )
2406 )
2399 alldata['totaltime'].append(
2407 alldata['totaltime'].append(
2400 (data['time'], b.hex(), p1.hex(), p2.hex())
2408 (data['time'], b.hex(), p1.hex(), p2.hex())
2401 )
2409 )
2402 fm.startitem()
2410 fm.startitem()
2403 fm.data(**data)
2411 fm.data(**data)
2404 # make node pretty for the human output
2412 # make node pretty for the human output
2405 out = data.copy()
2413 out = data.copy()
2406 out['base'] = fm.hexfunc(b.node())
2414 out['base'] = fm.hexfunc(b.node())
2407 out['p1.node'] = fm.hexfunc(p1.node())
2415 out['p1.node'] = fm.hexfunc(p1.node())
2408 out['p2.node'] = fm.hexfunc(p2.node())
2416 out['p2.node'] = fm.hexfunc(p2.node())
2409 fm.plain(output % out)
2417 fm.plain(output % out)
2410
2418
2411 fm.end()
2419 fm.end()
2412 if dostats:
2420 if dostats:
2413 # use a second formatter because the data are quite different, not sure
2421 # use a second formatter because the data are quite different, not sure
2414 # how it flies with the templater.
2422 # how it flies with the templater.
2415 entries = [
2423 entries = [
2416 ('nbrevs', 'number of revision covered'),
2424 ('nbrevs', 'number of revision covered'),
2417 ('nbmissingfiles', 'number of missing files at head'),
2425 ('nbmissingfiles', 'number of missing files at head'),
2418 ]
2426 ]
2419 if dotiming:
2427 if dotiming:
2420 entries.append(
2428 entries.append(
2421 ('parentnbrenames', 'rename from one parent to base')
2429 ('parentnbrenames', 'rename from one parent to base')
2422 )
2430 )
2423 entries.append(('totalnbrenames', 'total number of renames'))
2431 entries.append(('totalnbrenames', 'total number of renames'))
2424 entries.append(('parenttime', 'time for one parent'))
2432 entries.append(('parenttime', 'time for one parent'))
2425 entries.append(('totaltime', 'time for both parents'))
2433 entries.append(('totaltime', 'time for both parents'))
2426 _displaystats(ui, opts, entries, alldata)
2434 _displaystats(ui, opts, entries, alldata)
2427
2435
2428
2436
2429 @command(
2437 @command(
2430 b'perf::helper-pathcopies|perfhelper-pathcopies',
2438 b'perf::helper-pathcopies|perfhelper-pathcopies',
2431 formatteropts
2439 formatteropts
2432 + [
2440 + [
2433 (b'r', b'revs', [], b'restrict search to these revisions'),
2441 (b'r', b'revs', [], b'restrict search to these revisions'),
2434 (b'', b'timing', False, b'provides extra data (costly)'),
2442 (b'', b'timing', False, b'provides extra data (costly)'),
2435 (b'', b'stats', False, b'provides statistic about the measured data'),
2443 (b'', b'stats', False, b'provides statistic about the measured data'),
2436 ],
2444 ],
2437 )
2445 )
2438 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2446 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2439 """find statistic about potential parameters for the `perftracecopies`
2447 """find statistic about potential parameters for the `perftracecopies`
2440
2448
2441 This command find source-destination pair relevant for copytracing testing.
2449 This command find source-destination pair relevant for copytracing testing.
2442 It report value for some of the parameters that impact copy tracing time.
2450 It report value for some of the parameters that impact copy tracing time.
2443
2451
2444 If `--timing` is set, rename detection is run and the associated timing
2452 If `--timing` is set, rename detection is run and the associated timing
2445 will be reported. The extra details comes at the cost of a slower command
2453 will be reported. The extra details comes at the cost of a slower command
2446 execution.
2454 execution.
2447
2455
2448 Since the rename detection is only run once, other factors might easily
2456 Since the rename detection is only run once, other factors might easily
2449 affect the precision of the timing. However it should give a good
2457 affect the precision of the timing. However it should give a good
2450 approximation of which revision pairs are very costly.
2458 approximation of which revision pairs are very costly.
2451 """
2459 """
2452 opts = _byteskwargs(opts)
2460 opts = _byteskwargs(opts)
2453 fm = ui.formatter(b'perf', opts)
2461 fm = ui.formatter(b'perf', opts)
2454 dotiming = opts[b'timing']
2462 dotiming = opts[b'timing']
2455 dostats = opts[b'stats']
2463 dostats = opts[b'stats']
2456
2464
2457 if dotiming:
2465 if dotiming:
2458 header = '%12s %12s %12s %12s %12s %12s\n'
2466 header = '%12s %12s %12s %12s %12s %12s\n'
2459 output = (
2467 output = (
2460 "%(source)12s %(destination)12s "
2468 "%(source)12s %(destination)12s "
2461 "%(nbrevs)12d %(nbmissingfiles)12d "
2469 "%(nbrevs)12d %(nbmissingfiles)12d "
2462 "%(nbrenamedfiles)12d %(time)18.5f\n"
2470 "%(nbrenamedfiles)12d %(time)18.5f\n"
2463 )
2471 )
2464 header_names = (
2472 header_names = (
2465 "source",
2473 "source",
2466 "destination",
2474 "destination",
2467 "nb-revs",
2475 "nb-revs",
2468 "nb-files",
2476 "nb-files",
2469 "nb-renames",
2477 "nb-renames",
2470 "time",
2478 "time",
2471 )
2479 )
2472 fm.plain(header % header_names)
2480 fm.plain(header % header_names)
2473 else:
2481 else:
2474 header = '%12s %12s %12s %12s\n'
2482 header = '%12s %12s %12s %12s\n'
2475 output = (
2483 output = (
2476 "%(source)12s %(destination)12s "
2484 "%(source)12s %(destination)12s "
2477 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2485 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2478 )
2486 )
2479 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2487 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2480
2488
2481 if not revs:
2489 if not revs:
2482 revs = ['all()']
2490 revs = ['all()']
2483 revs = scmutil.revrange(repo, revs)
2491 revs = scmutil.revrange(repo, revs)
2484
2492
2485 if dostats:
2493 if dostats:
2486 alldata = {
2494 alldata = {
2487 'nbrevs': [],
2495 'nbrevs': [],
2488 'nbmissingfiles': [],
2496 'nbmissingfiles': [],
2489 }
2497 }
2490 if dotiming:
2498 if dotiming:
2491 alldata['nbrenames'] = []
2499 alldata['nbrenames'] = []
2492 alldata['time'] = []
2500 alldata['time'] = []
2493
2501
2494 roi = repo.revs('merge() and %ld', revs)
2502 roi = repo.revs('merge() and %ld', revs)
2495 for r in roi:
2503 for r in roi:
2496 ctx = repo[r]
2504 ctx = repo[r]
2497 p1 = ctx.p1().rev()
2505 p1 = ctx.p1().rev()
2498 p2 = ctx.p2().rev()
2506 p2 = ctx.p2().rev()
2499 bases = repo.changelog._commonancestorsheads(p1, p2)
2507 bases = repo.changelog._commonancestorsheads(p1, p2)
2500 for p in (p1, p2):
2508 for p in (p1, p2):
2501 for b in bases:
2509 for b in bases:
2502 base = repo[b]
2510 base = repo[b]
2503 parent = repo[p]
2511 parent = repo[p]
2504 missing = copies._computeforwardmissing(base, parent)
2512 missing = copies._computeforwardmissing(base, parent)
2505 if not missing:
2513 if not missing:
2506 continue
2514 continue
2507 data = {
2515 data = {
2508 b'source': base.hex(),
2516 b'source': base.hex(),
2509 b'destination': parent.hex(),
2517 b'destination': parent.hex(),
2510 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2518 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2511 b'nbmissingfiles': len(missing),
2519 b'nbmissingfiles': len(missing),
2512 }
2520 }
2513 if dostats:
2521 if dostats:
2514 alldata['nbrevs'].append(
2522 alldata['nbrevs'].append(
2515 (
2523 (
2516 data['nbrevs'],
2524 data['nbrevs'],
2517 base.hex(),
2525 base.hex(),
2518 parent.hex(),
2526 parent.hex(),
2519 )
2527 )
2520 )
2528 )
2521 alldata['nbmissingfiles'].append(
2529 alldata['nbmissingfiles'].append(
2522 (
2530 (
2523 data['nbmissingfiles'],
2531 data['nbmissingfiles'],
2524 base.hex(),
2532 base.hex(),
2525 parent.hex(),
2533 parent.hex(),
2526 )
2534 )
2527 )
2535 )
2528 if dotiming:
2536 if dotiming:
2529 begin = util.timer()
2537 begin = util.timer()
2530 renames = copies.pathcopies(base, parent)
2538 renames = copies.pathcopies(base, parent)
2531 end = util.timer()
2539 end = util.timer()
2532 # not very stable timing since we did only one run
2540 # not very stable timing since we did only one run
2533 data['time'] = end - begin
2541 data['time'] = end - begin
2534 data['nbrenamedfiles'] = len(renames)
2542 data['nbrenamedfiles'] = len(renames)
2535 if dostats:
2543 if dostats:
2536 alldata['time'].append(
2544 alldata['time'].append(
2537 (
2545 (
2538 data['time'],
2546 data['time'],
2539 base.hex(),
2547 base.hex(),
2540 parent.hex(),
2548 parent.hex(),
2541 )
2549 )
2542 )
2550 )
2543 alldata['nbrenames'].append(
2551 alldata['nbrenames'].append(
2544 (
2552 (
2545 data['nbrenamedfiles'],
2553 data['nbrenamedfiles'],
2546 base.hex(),
2554 base.hex(),
2547 parent.hex(),
2555 parent.hex(),
2548 )
2556 )
2549 )
2557 )
2550 fm.startitem()
2558 fm.startitem()
2551 fm.data(**data)
2559 fm.data(**data)
2552 out = data.copy()
2560 out = data.copy()
2553 out['source'] = fm.hexfunc(base.node())
2561 out['source'] = fm.hexfunc(base.node())
2554 out['destination'] = fm.hexfunc(parent.node())
2562 out['destination'] = fm.hexfunc(parent.node())
2555 fm.plain(output % out)
2563 fm.plain(output % out)
2556
2564
2557 fm.end()
2565 fm.end()
2558 if dostats:
2566 if dostats:
2559 entries = [
2567 entries = [
2560 ('nbrevs', 'number of revision covered'),
2568 ('nbrevs', 'number of revision covered'),
2561 ('nbmissingfiles', 'number of missing files at head'),
2569 ('nbmissingfiles', 'number of missing files at head'),
2562 ]
2570 ]
2563 if dotiming:
2571 if dotiming:
2564 entries.append(('nbrenames', 'renamed files'))
2572 entries.append(('nbrenames', 'renamed files'))
2565 entries.append(('time', 'time'))
2573 entries.append(('time', 'time'))
2566 _displaystats(ui, opts, entries, alldata)
2574 _displaystats(ui, opts, entries, alldata)
2567
2575
2568
2576
2569 @command(b'perf::cca|perfcca', formatteropts)
2577 @command(b'perf::cca|perfcca', formatteropts)
2570 def perfcca(ui, repo, **opts):
2578 def perfcca(ui, repo, **opts):
2571 opts = _byteskwargs(opts)
2579 opts = _byteskwargs(opts)
2572 timer, fm = gettimer(ui, opts)
2580 timer, fm = gettimer(ui, opts)
2573 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2581 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2574 fm.end()
2582 fm.end()
2575
2583
2576
2584
2577 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2585 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2578 def perffncacheload(ui, repo, **opts):
2586 def perffncacheload(ui, repo, **opts):
2579 opts = _byteskwargs(opts)
2587 opts = _byteskwargs(opts)
2580 timer, fm = gettimer(ui, opts)
2588 timer, fm = gettimer(ui, opts)
2581 s = repo.store
2589 s = repo.store
2582
2590
2583 def d():
2591 def d():
2584 s.fncache._load()
2592 s.fncache._load()
2585
2593
2586 timer(d)
2594 timer(d)
2587 fm.end()
2595 fm.end()
2588
2596
2589
2597
2590 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2598 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2591 def perffncachewrite(ui, repo, **opts):
2599 def perffncachewrite(ui, repo, **opts):
2592 opts = _byteskwargs(opts)
2600 opts = _byteskwargs(opts)
2593 timer, fm = gettimer(ui, opts)
2601 timer, fm = gettimer(ui, opts)
2594 s = repo.store
2602 s = repo.store
2595 lock = repo.lock()
2603 lock = repo.lock()
2596 s.fncache._load()
2604 s.fncache._load()
2597 tr = repo.transaction(b'perffncachewrite')
2605 tr = repo.transaction(b'perffncachewrite')
2598 tr.addbackup(b'fncache')
2606 tr.addbackup(b'fncache')
2599
2607
2600 def d():
2608 def d():
2601 s.fncache._dirty = True
2609 s.fncache._dirty = True
2602 s.fncache.write(tr)
2610 s.fncache.write(tr)
2603
2611
2604 timer(d)
2612 timer(d)
2605 tr.close()
2613 tr.close()
2606 lock.release()
2614 lock.release()
2607 fm.end()
2615 fm.end()
2608
2616
2609
2617
2610 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2618 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2611 def perffncacheencode(ui, repo, **opts):
2619 def perffncacheencode(ui, repo, **opts):
2612 opts = _byteskwargs(opts)
2620 opts = _byteskwargs(opts)
2613 timer, fm = gettimer(ui, opts)
2621 timer, fm = gettimer(ui, opts)
2614 s = repo.store
2622 s = repo.store
2615 s.fncache._load()
2623 s.fncache._load()
2616
2624
2617 def d():
2625 def d():
2618 for p in s.fncache.entries:
2626 for p in s.fncache.entries:
2619 s.encode(p)
2627 s.encode(p)
2620
2628
2621 timer(d)
2629 timer(d)
2622 fm.end()
2630 fm.end()
2623
2631
2624
2632
2625 def _bdiffworker(q, blocks, xdiff, ready, done):
2633 def _bdiffworker(q, blocks, xdiff, ready, done):
2626 while not done.is_set():
2634 while not done.is_set():
2627 pair = q.get()
2635 pair = q.get()
2628 while pair is not None:
2636 while pair is not None:
2629 if xdiff:
2637 if xdiff:
2630 mdiff.bdiff.xdiffblocks(*pair)
2638 mdiff.bdiff.xdiffblocks(*pair)
2631 elif blocks:
2639 elif blocks:
2632 mdiff.bdiff.blocks(*pair)
2640 mdiff.bdiff.blocks(*pair)
2633 else:
2641 else:
2634 mdiff.textdiff(*pair)
2642 mdiff.textdiff(*pair)
2635 q.task_done()
2643 q.task_done()
2636 pair = q.get()
2644 pair = q.get()
2637 q.task_done() # for the None one
2645 q.task_done() # for the None one
2638 with ready:
2646 with ready:
2639 ready.wait()
2647 ready.wait()
2640
2648
2641
2649
2642 def _manifestrevision(repo, mnode):
2650 def _manifestrevision(repo, mnode):
2643 ml = repo.manifestlog
2651 ml = repo.manifestlog
2644
2652
2645 if util.safehasattr(ml, b'getstorage'):
2653 if util.safehasattr(ml, b'getstorage'):
2646 store = ml.getstorage(b'')
2654 store = ml.getstorage(b'')
2647 else:
2655 else:
2648 store = ml._revlog
2656 store = ml._revlog
2649
2657
2650 return store.revision(mnode)
2658 return store.revision(mnode)
2651
2659
2652
2660
2653 @command(
2661 @command(
2654 b'perf::bdiff|perfbdiff',
2662 b'perf::bdiff|perfbdiff',
2655 revlogopts
2663 revlogopts
2656 + formatteropts
2664 + formatteropts
2657 + [
2665 + [
2658 (
2666 (
2659 b'',
2667 b'',
2660 b'count',
2668 b'count',
2661 1,
2669 1,
2662 b'number of revisions to test (when using --startrev)',
2670 b'number of revisions to test (when using --startrev)',
2663 ),
2671 ),
2664 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2672 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2665 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2673 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2666 (b'', b'blocks', False, b'test computing diffs into blocks'),
2674 (b'', b'blocks', False, b'test computing diffs into blocks'),
2667 (b'', b'xdiff', False, b'use xdiff algorithm'),
2675 (b'', b'xdiff', False, b'use xdiff algorithm'),
2668 ],
2676 ],
2669 b'-c|-m|FILE REV',
2677 b'-c|-m|FILE REV',
2670 )
2678 )
2671 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2679 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2672 """benchmark a bdiff between revisions
2680 """benchmark a bdiff between revisions
2673
2681
2674 By default, benchmark a bdiff between its delta parent and itself.
2682 By default, benchmark a bdiff between its delta parent and itself.
2675
2683
2676 With ``--count``, benchmark bdiffs between delta parents and self for N
2684 With ``--count``, benchmark bdiffs between delta parents and self for N
2677 revisions starting at the specified revision.
2685 revisions starting at the specified revision.
2678
2686
2679 With ``--alldata``, assume the requested revision is a changeset and
2687 With ``--alldata``, assume the requested revision is a changeset and
2680 measure bdiffs for all changes related to that changeset (manifest
2688 measure bdiffs for all changes related to that changeset (manifest
2681 and filelogs).
2689 and filelogs).
2682 """
2690 """
2683 opts = _byteskwargs(opts)
2691 opts = _byteskwargs(opts)
2684
2692
2685 if opts[b'xdiff'] and not opts[b'blocks']:
2693 if opts[b'xdiff'] and not opts[b'blocks']:
2686 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2694 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2687
2695
2688 if opts[b'alldata']:
2696 if opts[b'alldata']:
2689 opts[b'changelog'] = True
2697 opts[b'changelog'] = True
2690
2698
2691 if opts.get(b'changelog') or opts.get(b'manifest'):
2699 if opts.get(b'changelog') or opts.get(b'manifest'):
2692 file_, rev = None, file_
2700 file_, rev = None, file_
2693 elif rev is None:
2701 elif rev is None:
2694 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2702 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2695
2703
2696 blocks = opts[b'blocks']
2704 blocks = opts[b'blocks']
2697 xdiff = opts[b'xdiff']
2705 xdiff = opts[b'xdiff']
2698 textpairs = []
2706 textpairs = []
2699
2707
2700 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2708 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2701
2709
2702 startrev = r.rev(r.lookup(rev))
2710 startrev = r.rev(r.lookup(rev))
2703 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2711 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2704 if opts[b'alldata']:
2712 if opts[b'alldata']:
2705 # Load revisions associated with changeset.
2713 # Load revisions associated with changeset.
2706 ctx = repo[rev]
2714 ctx = repo[rev]
2707 mtext = _manifestrevision(repo, ctx.manifestnode())
2715 mtext = _manifestrevision(repo, ctx.manifestnode())
2708 for pctx in ctx.parents():
2716 for pctx in ctx.parents():
2709 pman = _manifestrevision(repo, pctx.manifestnode())
2717 pman = _manifestrevision(repo, pctx.manifestnode())
2710 textpairs.append((pman, mtext))
2718 textpairs.append((pman, mtext))
2711
2719
2712 # Load filelog revisions by iterating manifest delta.
2720 # Load filelog revisions by iterating manifest delta.
2713 man = ctx.manifest()
2721 man = ctx.manifest()
2714 pman = ctx.p1().manifest()
2722 pman = ctx.p1().manifest()
2715 for filename, change in pman.diff(man).items():
2723 for filename, change in pman.diff(man).items():
2716 fctx = repo.file(filename)
2724 fctx = repo.file(filename)
2717 f1 = fctx.revision(change[0][0] or -1)
2725 f1 = fctx.revision(change[0][0] or -1)
2718 f2 = fctx.revision(change[1][0] or -1)
2726 f2 = fctx.revision(change[1][0] or -1)
2719 textpairs.append((f1, f2))
2727 textpairs.append((f1, f2))
2720 else:
2728 else:
2721 dp = r.deltaparent(rev)
2729 dp = r.deltaparent(rev)
2722 textpairs.append((r.revision(dp), r.revision(rev)))
2730 textpairs.append((r.revision(dp), r.revision(rev)))
2723
2731
2724 withthreads = threads > 0
2732 withthreads = threads > 0
2725 if not withthreads:
2733 if not withthreads:
2726
2734
2727 def d():
2735 def d():
2728 for pair in textpairs:
2736 for pair in textpairs:
2729 if xdiff:
2737 if xdiff:
2730 mdiff.bdiff.xdiffblocks(*pair)
2738 mdiff.bdiff.xdiffblocks(*pair)
2731 elif blocks:
2739 elif blocks:
2732 mdiff.bdiff.blocks(*pair)
2740 mdiff.bdiff.blocks(*pair)
2733 else:
2741 else:
2734 mdiff.textdiff(*pair)
2742 mdiff.textdiff(*pair)
2735
2743
2736 else:
2744 else:
2737 q = queue()
2745 q = queue()
2738 for i in _xrange(threads):
2746 for i in _xrange(threads):
2739 q.put(None)
2747 q.put(None)
2740 ready = threading.Condition()
2748 ready = threading.Condition()
2741 done = threading.Event()
2749 done = threading.Event()
2742 for i in _xrange(threads):
2750 for i in _xrange(threads):
2743 threading.Thread(
2751 threading.Thread(
2744 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2752 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2745 ).start()
2753 ).start()
2746 q.join()
2754 q.join()
2747
2755
2748 def d():
2756 def d():
2749 for pair in textpairs:
2757 for pair in textpairs:
2750 q.put(pair)
2758 q.put(pair)
2751 for i in _xrange(threads):
2759 for i in _xrange(threads):
2752 q.put(None)
2760 q.put(None)
2753 with ready:
2761 with ready:
2754 ready.notify_all()
2762 ready.notify_all()
2755 q.join()
2763 q.join()
2756
2764
2757 timer, fm = gettimer(ui, opts)
2765 timer, fm = gettimer(ui, opts)
2758 timer(d)
2766 timer(d)
2759 fm.end()
2767 fm.end()
2760
2768
2761 if withthreads:
2769 if withthreads:
2762 done.set()
2770 done.set()
2763 for i in _xrange(threads):
2771 for i in _xrange(threads):
2764 q.put(None)
2772 q.put(None)
2765 with ready:
2773 with ready:
2766 ready.notify_all()
2774 ready.notify_all()
2767
2775
2768
2776
2769 @command(
2777 @command(
2770 b'perf::unbundle',
2778 b'perf::unbundle',
2771 formatteropts,
2779 formatteropts,
2772 b'BUNDLE_FILE',
2780 b'BUNDLE_FILE',
2773 )
2781 )
2774 def perf_unbundle(ui, repo, fname, **opts):
2782 def perf_unbundle(ui, repo, fname, **opts):
2775 """benchmark application of a bundle in a repository.
2783 """benchmark application of a bundle in a repository.
2776
2784
2777 This does not include the final transaction processing"""
2785 This does not include the final transaction processing"""
2778
2786
2779 from mercurial import exchange
2787 from mercurial import exchange
2780 from mercurial import bundle2
2788 from mercurial import bundle2
2781 from mercurial import transaction
2789 from mercurial import transaction
2782
2790
2783 opts = _byteskwargs(opts)
2791 opts = _byteskwargs(opts)
2784
2792
2785 ### some compatibility hotfix
2793 ### some compatibility hotfix
2786 #
2794 #
2787 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2795 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2788 # critical regression that break transaction rollback for files that are
2796 # critical regression that break transaction rollback for files that are
2789 # de-inlined.
2797 # de-inlined.
2790 method = transaction.transaction._addentry
2798 method = transaction.transaction._addentry
2791 pre_63edc384d3b7 = "data" in getargspec(method).args
2799 pre_63edc384d3b7 = "data" in getargspec(method).args
2792 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2800 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2793 # a changeset that is a close descendant of 18415fc918a1, the changeset
2801 # a changeset that is a close descendant of 18415fc918a1, the changeset
2794 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2802 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2795 args = getargspec(error.Abort.__init__).args
2803 args = getargspec(error.Abort.__init__).args
2796 post_18415fc918a1 = "detailed_exit_code" in args
2804 post_18415fc918a1 = "detailed_exit_code" in args
2797
2805
2798 old_max_inline = None
2806 old_max_inline = None
2799 try:
2807 try:
2800 if not (pre_63edc384d3b7 or post_18415fc918a1):
2808 if not (pre_63edc384d3b7 or post_18415fc918a1):
2801 # disable inlining
2809 # disable inlining
2802 old_max_inline = mercurial.revlog._maxinline
2810 old_max_inline = mercurial.revlog._maxinline
2803 # large enough to never happen
2811 # large enough to never happen
2804 mercurial.revlog._maxinline = 2 ** 50
2812 mercurial.revlog._maxinline = 2 ** 50
2805
2813
2806 with repo.lock():
2814 with repo.lock():
2807 bundle = [None, None]
2815 bundle = [None, None]
2808 orig_quiet = repo.ui.quiet
2816 orig_quiet = repo.ui.quiet
2809 try:
2817 try:
2810 repo.ui.quiet = True
2818 repo.ui.quiet = True
2811 with open(fname, mode="rb") as f:
2819 with open(fname, mode="rb") as f:
2812
2820
2813 def noop_report(*args, **kwargs):
2821 def noop_report(*args, **kwargs):
2814 pass
2822 pass
2815
2823
2816 def setup():
2824 def setup():
2817 gen, tr = bundle
2825 gen, tr = bundle
2818 if tr is not None:
2826 if tr is not None:
2819 tr.abort()
2827 tr.abort()
2820 bundle[:] = [None, None]
2828 bundle[:] = [None, None]
2821 f.seek(0)
2829 f.seek(0)
2822 bundle[0] = exchange.readbundle(ui, f, fname)
2830 bundle[0] = exchange.readbundle(ui, f, fname)
2823 bundle[1] = repo.transaction(b'perf::unbundle')
2831 bundle[1] = repo.transaction(b'perf::unbundle')
2824 # silence the transaction
2832 # silence the transaction
2825 bundle[1]._report = noop_report
2833 bundle[1]._report = noop_report
2826
2834
2827 def apply():
2835 def apply():
2828 gen, tr = bundle
2836 gen, tr = bundle
2829 bundle2.applybundle(
2837 bundle2.applybundle(
2830 repo,
2838 repo,
2831 gen,
2839 gen,
2832 tr,
2840 tr,
2833 source=b'perf::unbundle',
2841 source=b'perf::unbundle',
2834 url=fname,
2842 url=fname,
2835 )
2843 )
2836
2844
2837 timer, fm = gettimer(ui, opts)
2845 timer, fm = gettimer(ui, opts)
2838 timer(apply, setup=setup)
2846 timer(apply, setup=setup)
2839 fm.end()
2847 fm.end()
2840 finally:
2848 finally:
2841 repo.ui.quiet == orig_quiet
2849 repo.ui.quiet == orig_quiet
2842 gen, tr = bundle
2850 gen, tr = bundle
2843 if tr is not None:
2851 if tr is not None:
2844 tr.abort()
2852 tr.abort()
2845 finally:
2853 finally:
2846 if old_max_inline is not None:
2854 if old_max_inline is not None:
2847 mercurial.revlog._maxinline = old_max_inline
2855 mercurial.revlog._maxinline = old_max_inline
2848
2856
2849
2857
2850 @command(
2858 @command(
2851 b'perf::unidiff|perfunidiff',
2859 b'perf::unidiff|perfunidiff',
2852 revlogopts
2860 revlogopts
2853 + formatteropts
2861 + formatteropts
2854 + [
2862 + [
2855 (
2863 (
2856 b'',
2864 b'',
2857 b'count',
2865 b'count',
2858 1,
2866 1,
2859 b'number of revisions to test (when using --startrev)',
2867 b'number of revisions to test (when using --startrev)',
2860 ),
2868 ),
2861 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2869 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2862 ],
2870 ],
2863 b'-c|-m|FILE REV',
2871 b'-c|-m|FILE REV',
2864 )
2872 )
2865 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2873 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2866 """benchmark a unified diff between revisions
2874 """benchmark a unified diff between revisions
2867
2875
2868 This doesn't include any copy tracing - it's just a unified diff
2876 This doesn't include any copy tracing - it's just a unified diff
2869 of the texts.
2877 of the texts.
2870
2878
2871 By default, benchmark a diff between its delta parent and itself.
2879 By default, benchmark a diff between its delta parent and itself.
2872
2880
2873 With ``--count``, benchmark diffs between delta parents and self for N
2881 With ``--count``, benchmark diffs between delta parents and self for N
2874 revisions starting at the specified revision.
2882 revisions starting at the specified revision.
2875
2883
2876 With ``--alldata``, assume the requested revision is a changeset and
2884 With ``--alldata``, assume the requested revision is a changeset and
2877 measure diffs for all changes related to that changeset (manifest
2885 measure diffs for all changes related to that changeset (manifest
2878 and filelogs).
2886 and filelogs).
2879 """
2887 """
2880 opts = _byteskwargs(opts)
2888 opts = _byteskwargs(opts)
2881 if opts[b'alldata']:
2889 if opts[b'alldata']:
2882 opts[b'changelog'] = True
2890 opts[b'changelog'] = True
2883
2891
2884 if opts.get(b'changelog') or opts.get(b'manifest'):
2892 if opts.get(b'changelog') or opts.get(b'manifest'):
2885 file_, rev = None, file_
2893 file_, rev = None, file_
2886 elif rev is None:
2894 elif rev is None:
2887 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2895 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2888
2896
2889 textpairs = []
2897 textpairs = []
2890
2898
2891 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2899 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2892
2900
2893 startrev = r.rev(r.lookup(rev))
2901 startrev = r.rev(r.lookup(rev))
2894 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2902 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2895 if opts[b'alldata']:
2903 if opts[b'alldata']:
2896 # Load revisions associated with changeset.
2904 # Load revisions associated with changeset.
2897 ctx = repo[rev]
2905 ctx = repo[rev]
2898 mtext = _manifestrevision(repo, ctx.manifestnode())
2906 mtext = _manifestrevision(repo, ctx.manifestnode())
2899 for pctx in ctx.parents():
2907 for pctx in ctx.parents():
2900 pman = _manifestrevision(repo, pctx.manifestnode())
2908 pman = _manifestrevision(repo, pctx.manifestnode())
2901 textpairs.append((pman, mtext))
2909 textpairs.append((pman, mtext))
2902
2910
2903 # Load filelog revisions by iterating manifest delta.
2911 # Load filelog revisions by iterating manifest delta.
2904 man = ctx.manifest()
2912 man = ctx.manifest()
2905 pman = ctx.p1().manifest()
2913 pman = ctx.p1().manifest()
2906 for filename, change in pman.diff(man).items():
2914 for filename, change in pman.diff(man).items():
2907 fctx = repo.file(filename)
2915 fctx = repo.file(filename)
2908 f1 = fctx.revision(change[0][0] or -1)
2916 f1 = fctx.revision(change[0][0] or -1)
2909 f2 = fctx.revision(change[1][0] or -1)
2917 f2 = fctx.revision(change[1][0] or -1)
2910 textpairs.append((f1, f2))
2918 textpairs.append((f1, f2))
2911 else:
2919 else:
2912 dp = r.deltaparent(rev)
2920 dp = r.deltaparent(rev)
2913 textpairs.append((r.revision(dp), r.revision(rev)))
2921 textpairs.append((r.revision(dp), r.revision(rev)))
2914
2922
2915 def d():
2923 def d():
2916 for left, right in textpairs:
2924 for left, right in textpairs:
2917 # The date strings don't matter, so we pass empty strings.
2925 # The date strings don't matter, so we pass empty strings.
2918 headerlines, hunks = mdiff.unidiff(
2926 headerlines, hunks = mdiff.unidiff(
2919 left, b'', right, b'', b'left', b'right', binary=False
2927 left, b'', right, b'', b'left', b'right', binary=False
2920 )
2928 )
2921 # consume iterators in roughly the way patch.py does
2929 # consume iterators in roughly the way patch.py does
2922 b'\n'.join(headerlines)
2930 b'\n'.join(headerlines)
2923 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2931 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2924
2932
2925 timer, fm = gettimer(ui, opts)
2933 timer, fm = gettimer(ui, opts)
2926 timer(d)
2934 timer(d)
2927 fm.end()
2935 fm.end()
2928
2936
2929
2937
2930 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2938 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2931 def perfdiffwd(ui, repo, **opts):
2939 def perfdiffwd(ui, repo, **opts):
2932 """Profile diff of working directory changes"""
2940 """Profile diff of working directory changes"""
2933 opts = _byteskwargs(opts)
2941 opts = _byteskwargs(opts)
2934 timer, fm = gettimer(ui, opts)
2942 timer, fm = gettimer(ui, opts)
2935 options = {
2943 options = {
2936 'w': 'ignore_all_space',
2944 'w': 'ignore_all_space',
2937 'b': 'ignore_space_change',
2945 'b': 'ignore_space_change',
2938 'B': 'ignore_blank_lines',
2946 'B': 'ignore_blank_lines',
2939 }
2947 }
2940
2948
2941 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2949 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2942 opts = {options[c]: b'1' for c in diffopt}
2950 opts = {options[c]: b'1' for c in diffopt}
2943
2951
2944 def d():
2952 def d():
2945 ui.pushbuffer()
2953 ui.pushbuffer()
2946 commands.diff(ui, repo, **opts)
2954 commands.diff(ui, repo, **opts)
2947 ui.popbuffer()
2955 ui.popbuffer()
2948
2956
2949 diffopt = diffopt.encode('ascii')
2957 diffopt = diffopt.encode('ascii')
2950 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2958 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2951 timer(d, title=title)
2959 timer(d, title=title)
2952 fm.end()
2960 fm.end()
2953
2961
2954
2962
2955 @command(
2963 @command(
2956 b'perf::revlogindex|perfrevlogindex',
2964 b'perf::revlogindex|perfrevlogindex',
2957 revlogopts + formatteropts,
2965 revlogopts + formatteropts,
2958 b'-c|-m|FILE',
2966 b'-c|-m|FILE',
2959 )
2967 )
2960 def perfrevlogindex(ui, repo, file_=None, **opts):
2968 def perfrevlogindex(ui, repo, file_=None, **opts):
2961 """Benchmark operations against a revlog index.
2969 """Benchmark operations against a revlog index.
2962
2970
2963 This tests constructing a revlog instance, reading index data,
2971 This tests constructing a revlog instance, reading index data,
2964 parsing index data, and performing various operations related to
2972 parsing index data, and performing various operations related to
2965 index data.
2973 index data.
2966 """
2974 """
2967
2975
2968 opts = _byteskwargs(opts)
2976 opts = _byteskwargs(opts)
2969
2977
2970 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2978 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2971
2979
2972 opener = getattr(rl, 'opener') # trick linter
2980 opener = getattr(rl, 'opener') # trick linter
2973 # compat with hg <= 5.8
2981 # compat with hg <= 5.8
2974 radix = getattr(rl, 'radix', None)
2982 radix = getattr(rl, 'radix', None)
2975 indexfile = getattr(rl, '_indexfile', None)
2983 indexfile = getattr(rl, '_indexfile', None)
2976 if indexfile is None:
2984 if indexfile is None:
2977 # compatibility with <= hg-5.8
2985 # compatibility with <= hg-5.8
2978 indexfile = getattr(rl, 'indexfile')
2986 indexfile = getattr(rl, 'indexfile')
2979 data = opener.read(indexfile)
2987 data = opener.read(indexfile)
2980
2988
2981 header = struct.unpack(b'>I', data[0:4])[0]
2989 header = struct.unpack(b'>I', data[0:4])[0]
2982 version = header & 0xFFFF
2990 version = header & 0xFFFF
2983 if version == 1:
2991 if version == 1:
2984 inline = header & (1 << 16)
2992 inline = header & (1 << 16)
2985 else:
2993 else:
2986 raise error.Abort(b'unsupported revlog version: %d' % version)
2994 raise error.Abort(b'unsupported revlog version: %d' % version)
2987
2995
2988 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2996 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2989 if parse_index_v1 is None:
2997 if parse_index_v1 is None:
2990 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2998 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2991
2999
2992 rllen = len(rl)
3000 rllen = len(rl)
2993
3001
2994 node0 = rl.node(0)
3002 node0 = rl.node(0)
2995 node25 = rl.node(rllen // 4)
3003 node25 = rl.node(rllen // 4)
2996 node50 = rl.node(rllen // 2)
3004 node50 = rl.node(rllen // 2)
2997 node75 = rl.node(rllen // 4 * 3)
3005 node75 = rl.node(rllen // 4 * 3)
2998 node100 = rl.node(rllen - 1)
3006 node100 = rl.node(rllen - 1)
2999
3007
3000 allrevs = range(rllen)
3008 allrevs = range(rllen)
3001 allrevsrev = list(reversed(allrevs))
3009 allrevsrev = list(reversed(allrevs))
3002 allnodes = [rl.node(rev) for rev in range(rllen)]
3010 allnodes = [rl.node(rev) for rev in range(rllen)]
3003 allnodesrev = list(reversed(allnodes))
3011 allnodesrev = list(reversed(allnodes))
3004
3012
3005 def constructor():
3013 def constructor():
3006 if radix is not None:
3014 if radix is not None:
3007 revlog(opener, radix=radix)
3015 revlog(opener, radix=radix)
3008 else:
3016 else:
3009 # hg <= 5.8
3017 # hg <= 5.8
3010 revlog(opener, indexfile=indexfile)
3018 revlog(opener, indexfile=indexfile)
3011
3019
3012 def read():
3020 def read():
3013 with opener(indexfile) as fh:
3021 with opener(indexfile) as fh:
3014 fh.read()
3022 fh.read()
3015
3023
3016 def parseindex():
3024 def parseindex():
3017 parse_index_v1(data, inline)
3025 parse_index_v1(data, inline)
3018
3026
3019 def getentry(revornode):
3027 def getentry(revornode):
3020 index = parse_index_v1(data, inline)[0]
3028 index = parse_index_v1(data, inline)[0]
3021 index[revornode]
3029 index[revornode]
3022
3030
3023 def getentries(revs, count=1):
3031 def getentries(revs, count=1):
3024 index = parse_index_v1(data, inline)[0]
3032 index = parse_index_v1(data, inline)[0]
3025
3033
3026 for i in range(count):
3034 for i in range(count):
3027 for rev in revs:
3035 for rev in revs:
3028 index[rev]
3036 index[rev]
3029
3037
3030 def resolvenode(node):
3038 def resolvenode(node):
3031 index = parse_index_v1(data, inline)[0]
3039 index = parse_index_v1(data, inline)[0]
3032 rev = getattr(index, 'rev', None)
3040 rev = getattr(index, 'rev', None)
3033 if rev is None:
3041 if rev is None:
3034 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3042 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3035 # This only works for the C code.
3043 # This only works for the C code.
3036 if nodemap is None:
3044 if nodemap is None:
3037 return
3045 return
3038 rev = nodemap.__getitem__
3046 rev = nodemap.__getitem__
3039
3047
3040 try:
3048 try:
3041 rev(node)
3049 rev(node)
3042 except error.RevlogError:
3050 except error.RevlogError:
3043 pass
3051 pass
3044
3052
3045 def resolvenodes(nodes, count=1):
3053 def resolvenodes(nodes, count=1):
3046 index = parse_index_v1(data, inline)[0]
3054 index = parse_index_v1(data, inline)[0]
3047 rev = getattr(index, 'rev', None)
3055 rev = getattr(index, 'rev', None)
3048 if rev is None:
3056 if rev is None:
3049 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3057 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3050 # This only works for the C code.
3058 # This only works for the C code.
3051 if nodemap is None:
3059 if nodemap is None:
3052 return
3060 return
3053 rev = nodemap.__getitem__
3061 rev = nodemap.__getitem__
3054
3062
3055 for i in range(count):
3063 for i in range(count):
3056 for node in nodes:
3064 for node in nodes:
3057 try:
3065 try:
3058 rev(node)
3066 rev(node)
3059 except error.RevlogError:
3067 except error.RevlogError:
3060 pass
3068 pass
3061
3069
3062 benches = [
3070 benches = [
3063 (constructor, b'revlog constructor'),
3071 (constructor, b'revlog constructor'),
3064 (read, b'read'),
3072 (read, b'read'),
3065 (parseindex, b'create index object'),
3073 (parseindex, b'create index object'),
3066 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3074 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3067 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3075 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3068 (lambda: resolvenode(node0), b'look up node at rev 0'),
3076 (lambda: resolvenode(node0), b'look up node at rev 0'),
3069 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3077 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3070 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3078 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3071 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3079 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3072 (lambda: resolvenode(node100), b'look up node at tip'),
3080 (lambda: resolvenode(node100), b'look up node at tip'),
3073 # 2x variation is to measure caching impact.
3081 # 2x variation is to measure caching impact.
3074 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3082 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3075 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3083 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3076 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3084 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3077 (
3085 (
3078 lambda: resolvenodes(allnodesrev, 2),
3086 lambda: resolvenodes(allnodesrev, 2),
3079 b'look up all nodes 2x (reverse)',
3087 b'look up all nodes 2x (reverse)',
3080 ),
3088 ),
3081 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3089 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3082 (
3090 (
3083 lambda: getentries(allrevs, 2),
3091 lambda: getentries(allrevs, 2),
3084 b'retrieve all index entries 2x (forward)',
3092 b'retrieve all index entries 2x (forward)',
3085 ),
3093 ),
3086 (
3094 (
3087 lambda: getentries(allrevsrev),
3095 lambda: getentries(allrevsrev),
3088 b'retrieve all index entries (reverse)',
3096 b'retrieve all index entries (reverse)',
3089 ),
3097 ),
3090 (
3098 (
3091 lambda: getentries(allrevsrev, 2),
3099 lambda: getentries(allrevsrev, 2),
3092 b'retrieve all index entries 2x (reverse)',
3100 b'retrieve all index entries 2x (reverse)',
3093 ),
3101 ),
3094 ]
3102 ]
3095
3103
3096 for fn, title in benches:
3104 for fn, title in benches:
3097 timer, fm = gettimer(ui, opts)
3105 timer, fm = gettimer(ui, opts)
3098 timer(fn, title=title)
3106 timer(fn, title=title)
3099 fm.end()
3107 fm.end()
3100
3108
3101
3109
3102 @command(
3110 @command(
3103 b'perf::revlogrevisions|perfrevlogrevisions',
3111 b'perf::revlogrevisions|perfrevlogrevisions',
3104 revlogopts
3112 revlogopts
3105 + formatteropts
3113 + formatteropts
3106 + [
3114 + [
3107 (b'd', b'dist', 100, b'distance between the revisions'),
3115 (b'd', b'dist', 100, b'distance between the revisions'),
3108 (b's', b'startrev', 0, b'revision to start reading at'),
3116 (b's', b'startrev', 0, b'revision to start reading at'),
3109 (b'', b'reverse', False, b'read in reverse'),
3117 (b'', b'reverse', False, b'read in reverse'),
3110 ],
3118 ],
3111 b'-c|-m|FILE',
3119 b'-c|-m|FILE',
3112 )
3120 )
3113 def perfrevlogrevisions(
3121 def perfrevlogrevisions(
3114 ui, repo, file_=None, startrev=0, reverse=False, **opts
3122 ui, repo, file_=None, startrev=0, reverse=False, **opts
3115 ):
3123 ):
3116 """Benchmark reading a series of revisions from a revlog.
3124 """Benchmark reading a series of revisions from a revlog.
3117
3125
3118 By default, we read every ``-d/--dist`` revision from 0 to tip of
3126 By default, we read every ``-d/--dist`` revision from 0 to tip of
3119 the specified revlog.
3127 the specified revlog.
3120
3128
3121 The start revision can be defined via ``-s/--startrev``.
3129 The start revision can be defined via ``-s/--startrev``.
3122 """
3130 """
3123 opts = _byteskwargs(opts)
3131 opts = _byteskwargs(opts)
3124
3132
3125 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3133 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3126 rllen = getlen(ui)(rl)
3134 rllen = getlen(ui)(rl)
3127
3135
3128 if startrev < 0:
3136 if startrev < 0:
3129 startrev = rllen + startrev
3137 startrev = rllen + startrev
3130
3138
3131 def d():
3139 def d():
3132 rl.clearcaches()
3140 rl.clearcaches()
3133
3141
3134 beginrev = startrev
3142 beginrev = startrev
3135 endrev = rllen
3143 endrev = rllen
3136 dist = opts[b'dist']
3144 dist = opts[b'dist']
3137
3145
3138 if reverse:
3146 if reverse:
3139 beginrev, endrev = endrev - 1, beginrev - 1
3147 beginrev, endrev = endrev - 1, beginrev - 1
3140 dist = -1 * dist
3148 dist = -1 * dist
3141
3149
3142 for x in _xrange(beginrev, endrev, dist):
3150 for x in _xrange(beginrev, endrev, dist):
3143 # Old revisions don't support passing int.
3151 # Old revisions don't support passing int.
3144 n = rl.node(x)
3152 n = rl.node(x)
3145 rl.revision(n)
3153 rl.revision(n)
3146
3154
3147 timer, fm = gettimer(ui, opts)
3155 timer, fm = gettimer(ui, opts)
3148 timer(d)
3156 timer(d)
3149 fm.end()
3157 fm.end()
3150
3158
3151
3159
3152 @command(
3160 @command(
3153 b'perf::revlogwrite|perfrevlogwrite',
3161 b'perf::revlogwrite|perfrevlogwrite',
3154 revlogopts
3162 revlogopts
3155 + formatteropts
3163 + formatteropts
3156 + [
3164 + [
3157 (b's', b'startrev', 1000, b'revision to start writing at'),
3165 (b's', b'startrev', 1000, b'revision to start writing at'),
3158 (b'', b'stoprev', -1, b'last revision to write'),
3166 (b'', b'stoprev', -1, b'last revision to write'),
3159 (b'', b'count', 3, b'number of passes to perform'),
3167 (b'', b'count', 3, b'number of passes to perform'),
3160 (b'', b'details', False, b'print timing for every revisions tested'),
3168 (b'', b'details', False, b'print timing for every revisions tested'),
3161 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3169 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3162 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3170 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3163 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3171 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3164 ],
3172 ],
3165 b'-c|-m|FILE',
3173 b'-c|-m|FILE',
3166 )
3174 )
3167 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3175 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3168 """Benchmark writing a series of revisions to a revlog.
3176 """Benchmark writing a series of revisions to a revlog.
3169
3177
3170 Possible source values are:
3178 Possible source values are:
3171 * `full`: add from a full text (default).
3179 * `full`: add from a full text (default).
3172 * `parent-1`: add from a delta to the first parent
3180 * `parent-1`: add from a delta to the first parent
3173 * `parent-2`: add from a delta to the second parent if it exists
3181 * `parent-2`: add from a delta to the second parent if it exists
3174 (use a delta from the first parent otherwise)
3182 (use a delta from the first parent otherwise)
3175 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3183 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3176 * `storage`: add from the existing precomputed deltas
3184 * `storage`: add from the existing precomputed deltas
3177
3185
3178 Note: This performance command measures performance in a custom way. As a
3186 Note: This performance command measures performance in a custom way. As a
3179 result some of the global configuration of the 'perf' command does not
3187 result some of the global configuration of the 'perf' command does not
3180 apply to it:
3188 apply to it:
3181
3189
3182 * ``pre-run``: disabled
3190 * ``pre-run``: disabled
3183
3191
3184 * ``profile-benchmark``: disabled
3192 * ``profile-benchmark``: disabled
3185
3193
3186 * ``run-limits``: disabled use --count instead
3194 * ``run-limits``: disabled use --count instead
3187 """
3195 """
3188 opts = _byteskwargs(opts)
3196 opts = _byteskwargs(opts)
3189
3197
3190 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3198 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3191 rllen = getlen(ui)(rl)
3199 rllen = getlen(ui)(rl)
3192 if startrev < 0:
3200 if startrev < 0:
3193 startrev = rllen + startrev
3201 startrev = rllen + startrev
3194 if stoprev < 0:
3202 if stoprev < 0:
3195 stoprev = rllen + stoprev
3203 stoprev = rllen + stoprev
3196
3204
3197 lazydeltabase = opts['lazydeltabase']
3205 lazydeltabase = opts['lazydeltabase']
3198 source = opts['source']
3206 source = opts['source']
3199 clearcaches = opts['clear_caches']
3207 clearcaches = opts['clear_caches']
3200 validsource = (
3208 validsource = (
3201 b'full',
3209 b'full',
3202 b'parent-1',
3210 b'parent-1',
3203 b'parent-2',
3211 b'parent-2',
3204 b'parent-smallest',
3212 b'parent-smallest',
3205 b'storage',
3213 b'storage',
3206 )
3214 )
3207 if source not in validsource:
3215 if source not in validsource:
3208 raise error.Abort('invalid source type: %s' % source)
3216 raise error.Abort('invalid source type: %s' % source)
3209
3217
3210 ### actually gather results
3218 ### actually gather results
3211 count = opts['count']
3219 count = opts['count']
3212 if count <= 0:
3220 if count <= 0:
3213 raise error.Abort('invalide run count: %d' % count)
3221 raise error.Abort('invalide run count: %d' % count)
3214 allresults = []
3222 allresults = []
3215 for c in range(count):
3223 for c in range(count):
3216 timing = _timeonewrite(
3224 timing = _timeonewrite(
3217 ui,
3225 ui,
3218 rl,
3226 rl,
3219 source,
3227 source,
3220 startrev,
3228 startrev,
3221 stoprev,
3229 stoprev,
3222 c + 1,
3230 c + 1,
3223 lazydeltabase=lazydeltabase,
3231 lazydeltabase=lazydeltabase,
3224 clearcaches=clearcaches,
3232 clearcaches=clearcaches,
3225 )
3233 )
3226 allresults.append(timing)
3234 allresults.append(timing)
3227
3235
3228 ### consolidate the results in a single list
3236 ### consolidate the results in a single list
3229 results = []
3237 results = []
3230 for idx, (rev, t) in enumerate(allresults[0]):
3238 for idx, (rev, t) in enumerate(allresults[0]):
3231 ts = [t]
3239 ts = [t]
3232 for other in allresults[1:]:
3240 for other in allresults[1:]:
3233 orev, ot = other[idx]
3241 orev, ot = other[idx]
3234 assert orev == rev
3242 assert orev == rev
3235 ts.append(ot)
3243 ts.append(ot)
3236 results.append((rev, ts))
3244 results.append((rev, ts))
3237 resultcount = len(results)
3245 resultcount = len(results)
3238
3246
3239 ### Compute and display relevant statistics
3247 ### Compute and display relevant statistics
3240
3248
3241 # get a formatter
3249 # get a formatter
3242 fm = ui.formatter(b'perf', opts)
3250 fm = ui.formatter(b'perf', opts)
3243 displayall = ui.configbool(b"perf", b"all-timing", False)
3251 displayall = ui.configbool(b"perf", b"all-timing", False)
3244
3252
3245 # print individual details if requested
3253 # print individual details if requested
3246 if opts['details']:
3254 if opts['details']:
3247 for idx, item in enumerate(results, 1):
3255 for idx, item in enumerate(results, 1):
3248 rev, data = item
3256 rev, data = item
3249 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3257 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3250 formatone(fm, data, title=title, displayall=displayall)
3258 formatone(fm, data, title=title, displayall=displayall)
3251
3259
3252 # sorts results by median time
3260 # sorts results by median time
3253 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3261 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3254 # list of (name, index) to display)
3262 # list of (name, index) to display)
3255 relevants = [
3263 relevants = [
3256 ("min", 0),
3264 ("min", 0),
3257 ("10%", resultcount * 10 // 100),
3265 ("10%", resultcount * 10 // 100),
3258 ("25%", resultcount * 25 // 100),
3266 ("25%", resultcount * 25 // 100),
3259 ("50%", resultcount * 70 // 100),
3267 ("50%", resultcount * 70 // 100),
3260 ("75%", resultcount * 75 // 100),
3268 ("75%", resultcount * 75 // 100),
3261 ("90%", resultcount * 90 // 100),
3269 ("90%", resultcount * 90 // 100),
3262 ("95%", resultcount * 95 // 100),
3270 ("95%", resultcount * 95 // 100),
3263 ("99%", resultcount * 99 // 100),
3271 ("99%", resultcount * 99 // 100),
3264 ("99.9%", resultcount * 999 // 1000),
3272 ("99.9%", resultcount * 999 // 1000),
3265 ("99.99%", resultcount * 9999 // 10000),
3273 ("99.99%", resultcount * 9999 // 10000),
3266 ("99.999%", resultcount * 99999 // 100000),
3274 ("99.999%", resultcount * 99999 // 100000),
3267 ("max", -1),
3275 ("max", -1),
3268 ]
3276 ]
3269 if not ui.quiet:
3277 if not ui.quiet:
3270 for name, idx in relevants:
3278 for name, idx in relevants:
3271 data = results[idx]
3279 data = results[idx]
3272 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3280 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3273 formatone(fm, data[1], title=title, displayall=displayall)
3281 formatone(fm, data[1], title=title, displayall=displayall)
3274
3282
3275 # XXX summing that many float will not be very precise, we ignore this fact
3283 # XXX summing that many float will not be very precise, we ignore this fact
3276 # for now
3284 # for now
3277 totaltime = []
3285 totaltime = []
3278 for item in allresults:
3286 for item in allresults:
3279 totaltime.append(
3287 totaltime.append(
3280 (
3288 (
3281 sum(x[1][0] for x in item),
3289 sum(x[1][0] for x in item),
3282 sum(x[1][1] for x in item),
3290 sum(x[1][1] for x in item),
3283 sum(x[1][2] for x in item),
3291 sum(x[1][2] for x in item),
3284 )
3292 )
3285 )
3293 )
3286 formatone(
3294 formatone(
3287 fm,
3295 fm,
3288 totaltime,
3296 totaltime,
3289 title="total time (%d revs)" % resultcount,
3297 title="total time (%d revs)" % resultcount,
3290 displayall=displayall,
3298 displayall=displayall,
3291 )
3299 )
3292 fm.end()
3300 fm.end()
3293
3301
3294
3302
3295 class _faketr:
3303 class _faketr:
3296 def add(s, x, y, z=None):
3304 def add(s, x, y, z=None):
3297 return None
3305 return None
3298
3306
3299
3307
3300 def _timeonewrite(
3308 def _timeonewrite(
3301 ui,
3309 ui,
3302 orig,
3310 orig,
3303 source,
3311 source,
3304 startrev,
3312 startrev,
3305 stoprev,
3313 stoprev,
3306 runidx=None,
3314 runidx=None,
3307 lazydeltabase=True,
3315 lazydeltabase=True,
3308 clearcaches=True,
3316 clearcaches=True,
3309 ):
3317 ):
3310 timings = []
3318 timings = []
3311 tr = _faketr()
3319 tr = _faketr()
3312 with _temprevlog(ui, orig, startrev) as dest:
3320 with _temprevlog(ui, orig, startrev) as dest:
3313 dest._lazydeltabase = lazydeltabase
3321 dest._lazydeltabase = lazydeltabase
3314 revs = list(orig.revs(startrev, stoprev))
3322 revs = list(orig.revs(startrev, stoprev))
3315 total = len(revs)
3323 total = len(revs)
3316 topic = 'adding'
3324 topic = 'adding'
3317 if runidx is not None:
3325 if runidx is not None:
3318 topic += ' (run #%d)' % runidx
3326 topic += ' (run #%d)' % runidx
3319 # Support both old and new progress API
3327 # Support both old and new progress API
3320 if util.safehasattr(ui, 'makeprogress'):
3328 if util.safehasattr(ui, 'makeprogress'):
3321 progress = ui.makeprogress(topic, unit='revs', total=total)
3329 progress = ui.makeprogress(topic, unit='revs', total=total)
3322
3330
3323 def updateprogress(pos):
3331 def updateprogress(pos):
3324 progress.update(pos)
3332 progress.update(pos)
3325
3333
3326 def completeprogress():
3334 def completeprogress():
3327 progress.complete()
3335 progress.complete()
3328
3336
3329 else:
3337 else:
3330
3338
3331 def updateprogress(pos):
3339 def updateprogress(pos):
3332 ui.progress(topic, pos, unit='revs', total=total)
3340 ui.progress(topic, pos, unit='revs', total=total)
3333
3341
3334 def completeprogress():
3342 def completeprogress():
3335 ui.progress(topic, None, unit='revs', total=total)
3343 ui.progress(topic, None, unit='revs', total=total)
3336
3344
3337 for idx, rev in enumerate(revs):
3345 for idx, rev in enumerate(revs):
3338 updateprogress(idx)
3346 updateprogress(idx)
3339 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3347 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3340 if clearcaches:
3348 if clearcaches:
3341 dest.index.clearcaches()
3349 dest.index.clearcaches()
3342 dest.clearcaches()
3350 dest.clearcaches()
3343 with timeone() as r:
3351 with timeone() as r:
3344 dest.addrawrevision(*addargs, **addkwargs)
3352 dest.addrawrevision(*addargs, **addkwargs)
3345 timings.append((rev, r[0]))
3353 timings.append((rev, r[0]))
3346 updateprogress(total)
3354 updateprogress(total)
3347 completeprogress()
3355 completeprogress()
3348 return timings
3356 return timings
3349
3357
3350
3358
3351 def _getrevisionseed(orig, rev, tr, source):
3359 def _getrevisionseed(orig, rev, tr, source):
3352 from mercurial.node import nullid
3360 from mercurial.node import nullid
3353
3361
3354 linkrev = orig.linkrev(rev)
3362 linkrev = orig.linkrev(rev)
3355 node = orig.node(rev)
3363 node = orig.node(rev)
3356 p1, p2 = orig.parents(node)
3364 p1, p2 = orig.parents(node)
3357 flags = orig.flags(rev)
3365 flags = orig.flags(rev)
3358 cachedelta = None
3366 cachedelta = None
3359 text = None
3367 text = None
3360
3368
3361 if source == b'full':
3369 if source == b'full':
3362 text = orig.revision(rev)
3370 text = orig.revision(rev)
3363 elif source == b'parent-1':
3371 elif source == b'parent-1':
3364 baserev = orig.rev(p1)
3372 baserev = orig.rev(p1)
3365 cachedelta = (baserev, orig.revdiff(p1, rev))
3373 cachedelta = (baserev, orig.revdiff(p1, rev))
3366 elif source == b'parent-2':
3374 elif source == b'parent-2':
3367 parent = p2
3375 parent = p2
3368 if p2 == nullid:
3376 if p2 == nullid:
3369 parent = p1
3377 parent = p1
3370 baserev = orig.rev(parent)
3378 baserev = orig.rev(parent)
3371 cachedelta = (baserev, orig.revdiff(parent, rev))
3379 cachedelta = (baserev, orig.revdiff(parent, rev))
3372 elif source == b'parent-smallest':
3380 elif source == b'parent-smallest':
3373 p1diff = orig.revdiff(p1, rev)
3381 p1diff = orig.revdiff(p1, rev)
3374 parent = p1
3382 parent = p1
3375 diff = p1diff
3383 diff = p1diff
3376 if p2 != nullid:
3384 if p2 != nullid:
3377 p2diff = orig.revdiff(p2, rev)
3385 p2diff = orig.revdiff(p2, rev)
3378 if len(p1diff) > len(p2diff):
3386 if len(p1diff) > len(p2diff):
3379 parent = p2
3387 parent = p2
3380 diff = p2diff
3388 diff = p2diff
3381 baserev = orig.rev(parent)
3389 baserev = orig.rev(parent)
3382 cachedelta = (baserev, diff)
3390 cachedelta = (baserev, diff)
3383 elif source == b'storage':
3391 elif source == b'storage':
3384 baserev = orig.deltaparent(rev)
3392 baserev = orig.deltaparent(rev)
3385 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3393 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3386
3394
3387 return (
3395 return (
3388 (text, tr, linkrev, p1, p2),
3396 (text, tr, linkrev, p1, p2),
3389 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3397 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3390 )
3398 )
3391
3399
3392
3400
3393 @contextlib.contextmanager
3401 @contextlib.contextmanager
3394 def _temprevlog(ui, orig, truncaterev):
3402 def _temprevlog(ui, orig, truncaterev):
3395 from mercurial import vfs as vfsmod
3403 from mercurial import vfs as vfsmod
3396
3404
3397 if orig._inline:
3405 if orig._inline:
3398 raise error.Abort('not supporting inline revlog (yet)')
3406 raise error.Abort('not supporting inline revlog (yet)')
3399 revlogkwargs = {}
3407 revlogkwargs = {}
3400 k = 'upperboundcomp'
3408 k = 'upperboundcomp'
3401 if util.safehasattr(orig, k):
3409 if util.safehasattr(orig, k):
3402 revlogkwargs[k] = getattr(orig, k)
3410 revlogkwargs[k] = getattr(orig, k)
3403
3411
3404 indexfile = getattr(orig, '_indexfile', None)
3412 indexfile = getattr(orig, '_indexfile', None)
3405 if indexfile is None:
3413 if indexfile is None:
3406 # compatibility with <= hg-5.8
3414 # compatibility with <= hg-5.8
3407 indexfile = getattr(orig, 'indexfile')
3415 indexfile = getattr(orig, 'indexfile')
3408 origindexpath = orig.opener.join(indexfile)
3416 origindexpath = orig.opener.join(indexfile)
3409
3417
3410 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3418 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3411 origdatapath = orig.opener.join(datafile)
3419 origdatapath = orig.opener.join(datafile)
3412 radix = b'revlog'
3420 radix = b'revlog'
3413 indexname = b'revlog.i'
3421 indexname = b'revlog.i'
3414 dataname = b'revlog.d'
3422 dataname = b'revlog.d'
3415
3423
3416 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3424 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3417 try:
3425 try:
3418 # copy the data file in a temporary directory
3426 # copy the data file in a temporary directory
3419 ui.debug('copying data in %s\n' % tmpdir)
3427 ui.debug('copying data in %s\n' % tmpdir)
3420 destindexpath = os.path.join(tmpdir, 'revlog.i')
3428 destindexpath = os.path.join(tmpdir, 'revlog.i')
3421 destdatapath = os.path.join(tmpdir, 'revlog.d')
3429 destdatapath = os.path.join(tmpdir, 'revlog.d')
3422 shutil.copyfile(origindexpath, destindexpath)
3430 shutil.copyfile(origindexpath, destindexpath)
3423 shutil.copyfile(origdatapath, destdatapath)
3431 shutil.copyfile(origdatapath, destdatapath)
3424
3432
3425 # remove the data we want to add again
3433 # remove the data we want to add again
3426 ui.debug('truncating data to be rewritten\n')
3434 ui.debug('truncating data to be rewritten\n')
3427 with open(destindexpath, 'ab') as index:
3435 with open(destindexpath, 'ab') as index:
3428 index.seek(0)
3436 index.seek(0)
3429 index.truncate(truncaterev * orig._io.size)
3437 index.truncate(truncaterev * orig._io.size)
3430 with open(destdatapath, 'ab') as data:
3438 with open(destdatapath, 'ab') as data:
3431 data.seek(0)
3439 data.seek(0)
3432 data.truncate(orig.start(truncaterev))
3440 data.truncate(orig.start(truncaterev))
3433
3441
3434 # instantiate a new revlog from the temporary copy
3442 # instantiate a new revlog from the temporary copy
3435 ui.debug('truncating adding to be rewritten\n')
3443 ui.debug('truncating adding to be rewritten\n')
3436 vfs = vfsmod.vfs(tmpdir)
3444 vfs = vfsmod.vfs(tmpdir)
3437 vfs.options = getattr(orig.opener, 'options', None)
3445 vfs.options = getattr(orig.opener, 'options', None)
3438
3446
3439 try:
3447 try:
3440 dest = revlog(vfs, radix=radix, **revlogkwargs)
3448 dest = revlog(vfs, radix=radix, **revlogkwargs)
3441 except TypeError:
3449 except TypeError:
3442 dest = revlog(
3450 dest = revlog(
3443 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3451 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3444 )
3452 )
3445 if dest._inline:
3453 if dest._inline:
3446 raise error.Abort('not supporting inline revlog (yet)')
3454 raise error.Abort('not supporting inline revlog (yet)')
3447 # make sure internals are initialized
3455 # make sure internals are initialized
3448 dest.revision(len(dest) - 1)
3456 dest.revision(len(dest) - 1)
3449 yield dest
3457 yield dest
3450 del dest, vfs
3458 del dest, vfs
3451 finally:
3459 finally:
3452 shutil.rmtree(tmpdir, True)
3460 shutil.rmtree(tmpdir, True)
3453
3461
3454
3462
3455 @command(
3463 @command(
3456 b'perf::revlogchunks|perfrevlogchunks',
3464 b'perf::revlogchunks|perfrevlogchunks',
3457 revlogopts
3465 revlogopts
3458 + formatteropts
3466 + formatteropts
3459 + [
3467 + [
3460 (b'e', b'engines', b'', b'compression engines to use'),
3468 (b'e', b'engines', b'', b'compression engines to use'),
3461 (b's', b'startrev', 0, b'revision to start at'),
3469 (b's', b'startrev', 0, b'revision to start at'),
3462 ],
3470 ],
3463 b'-c|-m|FILE',
3471 b'-c|-m|FILE',
3464 )
3472 )
3465 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3473 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3466 """Benchmark operations on revlog chunks.
3474 """Benchmark operations on revlog chunks.
3467
3475
3468 Logically, each revlog is a collection of fulltext revisions. However,
3476 Logically, each revlog is a collection of fulltext revisions. However,
3469 stored within each revlog are "chunks" of possibly compressed data. This
3477 stored within each revlog are "chunks" of possibly compressed data. This
3470 data needs to be read and decompressed or compressed and written.
3478 data needs to be read and decompressed or compressed and written.
3471
3479
3472 This command measures the time it takes to read+decompress and recompress
3480 This command measures the time it takes to read+decompress and recompress
3473 chunks in a revlog. It effectively isolates I/O and compression performance.
3481 chunks in a revlog. It effectively isolates I/O and compression performance.
3474 For measurements of higher-level operations like resolving revisions,
3482 For measurements of higher-level operations like resolving revisions,
3475 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3483 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3476 """
3484 """
3477 opts = _byteskwargs(opts)
3485 opts = _byteskwargs(opts)
3478
3486
3479 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3487 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3480
3488
3481 # _chunkraw was renamed to _getsegmentforrevs.
3489 # _chunkraw was renamed to _getsegmentforrevs.
3482 try:
3490 try:
3483 segmentforrevs = rl._getsegmentforrevs
3491 segmentforrevs = rl._getsegmentforrevs
3484 except AttributeError:
3492 except AttributeError:
3485 segmentforrevs = rl._chunkraw
3493 segmentforrevs = rl._chunkraw
3486
3494
3487 # Verify engines argument.
3495 # Verify engines argument.
3488 if engines:
3496 if engines:
3489 engines = {e.strip() for e in engines.split(b',')}
3497 engines = {e.strip() for e in engines.split(b',')}
3490 for engine in engines:
3498 for engine in engines:
3491 try:
3499 try:
3492 util.compressionengines[engine]
3500 util.compressionengines[engine]
3493 except KeyError:
3501 except KeyError:
3494 raise error.Abort(b'unknown compression engine: %s' % engine)
3502 raise error.Abort(b'unknown compression engine: %s' % engine)
3495 else:
3503 else:
3496 engines = []
3504 engines = []
3497 for e in util.compengines:
3505 for e in util.compengines:
3498 engine = util.compengines[e]
3506 engine = util.compengines[e]
3499 try:
3507 try:
3500 if engine.available():
3508 if engine.available():
3501 engine.revlogcompressor().compress(b'dummy')
3509 engine.revlogcompressor().compress(b'dummy')
3502 engines.append(e)
3510 engines.append(e)
3503 except NotImplementedError:
3511 except NotImplementedError:
3504 pass
3512 pass
3505
3513
3506 revs = list(rl.revs(startrev, len(rl) - 1))
3514 revs = list(rl.revs(startrev, len(rl) - 1))
3507
3515
3508 def rlfh(rl):
3516 def rlfh(rl):
3509 if rl._inline:
3517 if rl._inline:
3510 indexfile = getattr(rl, '_indexfile', None)
3518 indexfile = getattr(rl, '_indexfile', None)
3511 if indexfile is None:
3519 if indexfile is None:
3512 # compatibility with <= hg-5.8
3520 # compatibility with <= hg-5.8
3513 indexfile = getattr(rl, 'indexfile')
3521 indexfile = getattr(rl, 'indexfile')
3514 return getsvfs(repo)(indexfile)
3522 return getsvfs(repo)(indexfile)
3515 else:
3523 else:
3516 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3524 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3517 return getsvfs(repo)(datafile)
3525 return getsvfs(repo)(datafile)
3518
3526
3519 def doread():
3527 def doread():
3520 rl.clearcaches()
3528 rl.clearcaches()
3521 for rev in revs:
3529 for rev in revs:
3522 segmentforrevs(rev, rev)
3530 segmentforrevs(rev, rev)
3523
3531
3524 def doreadcachedfh():
3532 def doreadcachedfh():
3525 rl.clearcaches()
3533 rl.clearcaches()
3526 fh = rlfh(rl)
3534 fh = rlfh(rl)
3527 for rev in revs:
3535 for rev in revs:
3528 segmentforrevs(rev, rev, df=fh)
3536 segmentforrevs(rev, rev, df=fh)
3529
3537
3530 def doreadbatch():
3538 def doreadbatch():
3531 rl.clearcaches()
3539 rl.clearcaches()
3532 segmentforrevs(revs[0], revs[-1])
3540 segmentforrevs(revs[0], revs[-1])
3533
3541
3534 def doreadbatchcachedfh():
3542 def doreadbatchcachedfh():
3535 rl.clearcaches()
3543 rl.clearcaches()
3536 fh = rlfh(rl)
3544 fh = rlfh(rl)
3537 segmentforrevs(revs[0], revs[-1], df=fh)
3545 segmentforrevs(revs[0], revs[-1], df=fh)
3538
3546
3539 def dochunk():
3547 def dochunk():
3540 rl.clearcaches()
3548 rl.clearcaches()
3541 fh = rlfh(rl)
3549 fh = rlfh(rl)
3542 for rev in revs:
3550 for rev in revs:
3543 rl._chunk(rev, df=fh)
3551 rl._chunk(rev, df=fh)
3544
3552
3545 chunks = [None]
3553 chunks = [None]
3546
3554
3547 def dochunkbatch():
3555 def dochunkbatch():
3548 rl.clearcaches()
3556 rl.clearcaches()
3549 fh = rlfh(rl)
3557 fh = rlfh(rl)
3550 # Save chunks as a side-effect.
3558 # Save chunks as a side-effect.
3551 chunks[0] = rl._chunks(revs, df=fh)
3559 chunks[0] = rl._chunks(revs, df=fh)
3552
3560
3553 def docompress(compressor):
3561 def docompress(compressor):
3554 rl.clearcaches()
3562 rl.clearcaches()
3555
3563
3556 try:
3564 try:
3557 # Swap in the requested compression engine.
3565 # Swap in the requested compression engine.
3558 oldcompressor = rl._compressor
3566 oldcompressor = rl._compressor
3559 rl._compressor = compressor
3567 rl._compressor = compressor
3560 for chunk in chunks[0]:
3568 for chunk in chunks[0]:
3561 rl.compress(chunk)
3569 rl.compress(chunk)
3562 finally:
3570 finally:
3563 rl._compressor = oldcompressor
3571 rl._compressor = oldcompressor
3564
3572
3565 benches = [
3573 benches = [
3566 (lambda: doread(), b'read'),
3574 (lambda: doread(), b'read'),
3567 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3575 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3568 (lambda: doreadbatch(), b'read batch'),
3576 (lambda: doreadbatch(), b'read batch'),
3569 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3577 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3570 (lambda: dochunk(), b'chunk'),
3578 (lambda: dochunk(), b'chunk'),
3571 (lambda: dochunkbatch(), b'chunk batch'),
3579 (lambda: dochunkbatch(), b'chunk batch'),
3572 ]
3580 ]
3573
3581
3574 for engine in sorted(engines):
3582 for engine in sorted(engines):
3575 compressor = util.compengines[engine].revlogcompressor()
3583 compressor = util.compengines[engine].revlogcompressor()
3576 benches.append(
3584 benches.append(
3577 (
3585 (
3578 functools.partial(docompress, compressor),
3586 functools.partial(docompress, compressor),
3579 b'compress w/ %s' % engine,
3587 b'compress w/ %s' % engine,
3580 )
3588 )
3581 )
3589 )
3582
3590
3583 for fn, title in benches:
3591 for fn, title in benches:
3584 timer, fm = gettimer(ui, opts)
3592 timer, fm = gettimer(ui, opts)
3585 timer(fn, title=title)
3593 timer(fn, title=title)
3586 fm.end()
3594 fm.end()
3587
3595
3588
3596
3589 @command(
3597 @command(
3590 b'perf::revlogrevision|perfrevlogrevision',
3598 b'perf::revlogrevision|perfrevlogrevision',
3591 revlogopts
3599 revlogopts
3592 + formatteropts
3600 + formatteropts
3593 + [(b'', b'cache', False, b'use caches instead of clearing')],
3601 + [(b'', b'cache', False, b'use caches instead of clearing')],
3594 b'-c|-m|FILE REV',
3602 b'-c|-m|FILE REV',
3595 )
3603 )
3596 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3604 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3597 """Benchmark obtaining a revlog revision.
3605 """Benchmark obtaining a revlog revision.
3598
3606
3599 Obtaining a revlog revision consists of roughly the following steps:
3607 Obtaining a revlog revision consists of roughly the following steps:
3600
3608
3601 1. Compute the delta chain
3609 1. Compute the delta chain
3602 2. Slice the delta chain if applicable
3610 2. Slice the delta chain if applicable
3603 3. Obtain the raw chunks for that delta chain
3611 3. Obtain the raw chunks for that delta chain
3604 4. Decompress each raw chunk
3612 4. Decompress each raw chunk
3605 5. Apply binary patches to obtain fulltext
3613 5. Apply binary patches to obtain fulltext
3606 6. Verify hash of fulltext
3614 6. Verify hash of fulltext
3607
3615
3608 This command measures the time spent in each of these phases.
3616 This command measures the time spent in each of these phases.
3609 """
3617 """
3610 opts = _byteskwargs(opts)
3618 opts = _byteskwargs(opts)
3611
3619
3612 if opts.get(b'changelog') or opts.get(b'manifest'):
3620 if opts.get(b'changelog') or opts.get(b'manifest'):
3613 file_, rev = None, file_
3621 file_, rev = None, file_
3614 elif rev is None:
3622 elif rev is None:
3615 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3623 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3616
3624
3617 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3625 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3618
3626
3619 # _chunkraw was renamed to _getsegmentforrevs.
3627 # _chunkraw was renamed to _getsegmentforrevs.
3620 try:
3628 try:
3621 segmentforrevs = r._getsegmentforrevs
3629 segmentforrevs = r._getsegmentforrevs
3622 except AttributeError:
3630 except AttributeError:
3623 segmentforrevs = r._chunkraw
3631 segmentforrevs = r._chunkraw
3624
3632
3625 node = r.lookup(rev)
3633 node = r.lookup(rev)
3626 rev = r.rev(node)
3634 rev = r.rev(node)
3627
3635
3628 def getrawchunks(data, chain):
3636 def getrawchunks(data, chain):
3629 start = r.start
3637 start = r.start
3630 length = r.length
3638 length = r.length
3631 inline = r._inline
3639 inline = r._inline
3632 try:
3640 try:
3633 iosize = r.index.entry_size
3641 iosize = r.index.entry_size
3634 except AttributeError:
3642 except AttributeError:
3635 iosize = r._io.size
3643 iosize = r._io.size
3636 buffer = util.buffer
3644 buffer = util.buffer
3637
3645
3638 chunks = []
3646 chunks = []
3639 ladd = chunks.append
3647 ladd = chunks.append
3640 for idx, item in enumerate(chain):
3648 for idx, item in enumerate(chain):
3641 offset = start(item[0])
3649 offset = start(item[0])
3642 bits = data[idx]
3650 bits = data[idx]
3643 for rev in item:
3651 for rev in item:
3644 chunkstart = start(rev)
3652 chunkstart = start(rev)
3645 if inline:
3653 if inline:
3646 chunkstart += (rev + 1) * iosize
3654 chunkstart += (rev + 1) * iosize
3647 chunklength = length(rev)
3655 chunklength = length(rev)
3648 ladd(buffer(bits, chunkstart - offset, chunklength))
3656 ladd(buffer(bits, chunkstart - offset, chunklength))
3649
3657
3650 return chunks
3658 return chunks
3651
3659
3652 def dodeltachain(rev):
3660 def dodeltachain(rev):
3653 if not cache:
3661 if not cache:
3654 r.clearcaches()
3662 r.clearcaches()
3655 r._deltachain(rev)
3663 r._deltachain(rev)
3656
3664
3657 def doread(chain):
3665 def doread(chain):
3658 if not cache:
3666 if not cache:
3659 r.clearcaches()
3667 r.clearcaches()
3660 for item in slicedchain:
3668 for item in slicedchain:
3661 segmentforrevs(item[0], item[-1])
3669 segmentforrevs(item[0], item[-1])
3662
3670
3663 def doslice(r, chain, size):
3671 def doslice(r, chain, size):
3664 for s in slicechunk(r, chain, targetsize=size):
3672 for s in slicechunk(r, chain, targetsize=size):
3665 pass
3673 pass
3666
3674
3667 def dorawchunks(data, chain):
3675 def dorawchunks(data, chain):
3668 if not cache:
3676 if not cache:
3669 r.clearcaches()
3677 r.clearcaches()
3670 getrawchunks(data, chain)
3678 getrawchunks(data, chain)
3671
3679
3672 def dodecompress(chunks):
3680 def dodecompress(chunks):
3673 decomp = r.decompress
3681 decomp = r.decompress
3674 for chunk in chunks:
3682 for chunk in chunks:
3675 decomp(chunk)
3683 decomp(chunk)
3676
3684
3677 def dopatch(text, bins):
3685 def dopatch(text, bins):
3678 if not cache:
3686 if not cache:
3679 r.clearcaches()
3687 r.clearcaches()
3680 mdiff.patches(text, bins)
3688 mdiff.patches(text, bins)
3681
3689
3682 def dohash(text):
3690 def dohash(text):
3683 if not cache:
3691 if not cache:
3684 r.clearcaches()
3692 r.clearcaches()
3685 r.checkhash(text, node, rev=rev)
3693 r.checkhash(text, node, rev=rev)
3686
3694
3687 def dorevision():
3695 def dorevision():
3688 if not cache:
3696 if not cache:
3689 r.clearcaches()
3697 r.clearcaches()
3690 r.revision(node)
3698 r.revision(node)
3691
3699
3692 try:
3700 try:
3693 from mercurial.revlogutils.deltas import slicechunk
3701 from mercurial.revlogutils.deltas import slicechunk
3694 except ImportError:
3702 except ImportError:
3695 slicechunk = getattr(revlog, '_slicechunk', None)
3703 slicechunk = getattr(revlog, '_slicechunk', None)
3696
3704
3697 size = r.length(rev)
3705 size = r.length(rev)
3698 chain = r._deltachain(rev)[0]
3706 chain = r._deltachain(rev)[0]
3699 if not getattr(r, '_withsparseread', False):
3707 if not getattr(r, '_withsparseread', False):
3700 slicedchain = (chain,)
3708 slicedchain = (chain,)
3701 else:
3709 else:
3702 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3710 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3703 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3711 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3704 rawchunks = getrawchunks(data, slicedchain)
3712 rawchunks = getrawchunks(data, slicedchain)
3705 bins = r._chunks(chain)
3713 bins = r._chunks(chain)
3706 text = bytes(bins[0])
3714 text = bytes(bins[0])
3707 bins = bins[1:]
3715 bins = bins[1:]
3708 text = mdiff.patches(text, bins)
3716 text = mdiff.patches(text, bins)
3709
3717
3710 benches = [
3718 benches = [
3711 (lambda: dorevision(), b'full'),
3719 (lambda: dorevision(), b'full'),
3712 (lambda: dodeltachain(rev), b'deltachain'),
3720 (lambda: dodeltachain(rev), b'deltachain'),
3713 (lambda: doread(chain), b'read'),
3721 (lambda: doread(chain), b'read'),
3714 ]
3722 ]
3715
3723
3716 if getattr(r, '_withsparseread', False):
3724 if getattr(r, '_withsparseread', False):
3717 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3725 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3718 benches.append(slicing)
3726 benches.append(slicing)
3719
3727
3720 benches.extend(
3728 benches.extend(
3721 [
3729 [
3722 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3730 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3723 (lambda: dodecompress(rawchunks), b'decompress'),
3731 (lambda: dodecompress(rawchunks), b'decompress'),
3724 (lambda: dopatch(text, bins), b'patch'),
3732 (lambda: dopatch(text, bins), b'patch'),
3725 (lambda: dohash(text), b'hash'),
3733 (lambda: dohash(text), b'hash'),
3726 ]
3734 ]
3727 )
3735 )
3728
3736
3729 timer, fm = gettimer(ui, opts)
3737 timer, fm = gettimer(ui, opts)
3730 for fn, title in benches:
3738 for fn, title in benches:
3731 timer(fn, title=title)
3739 timer(fn, title=title)
3732 fm.end()
3740 fm.end()
3733
3741
3734
3742
3735 @command(
3743 @command(
3736 b'perf::revset|perfrevset',
3744 b'perf::revset|perfrevset',
3737 [
3745 [
3738 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3746 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3739 (b'', b'contexts', False, b'obtain changectx for each revision'),
3747 (b'', b'contexts', False, b'obtain changectx for each revision'),
3740 ]
3748 ]
3741 + formatteropts,
3749 + formatteropts,
3742 b"REVSET",
3750 b"REVSET",
3743 )
3751 )
3744 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3752 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3745 """benchmark the execution time of a revset
3753 """benchmark the execution time of a revset
3746
3754
3747 Use the --clean option if need to evaluate the impact of build volatile
3755 Use the --clean option if need to evaluate the impact of build volatile
3748 revisions set cache on the revset execution. Volatile cache hold filtered
3756 revisions set cache on the revset execution. Volatile cache hold filtered
3749 and obsolete related cache."""
3757 and obsolete related cache."""
3750 opts = _byteskwargs(opts)
3758 opts = _byteskwargs(opts)
3751
3759
3752 timer, fm = gettimer(ui, opts)
3760 timer, fm = gettimer(ui, opts)
3753
3761
3754 def d():
3762 def d():
3755 if clear:
3763 if clear:
3756 repo.invalidatevolatilesets()
3764 repo.invalidatevolatilesets()
3757 if contexts:
3765 if contexts:
3758 for ctx in repo.set(expr):
3766 for ctx in repo.set(expr):
3759 pass
3767 pass
3760 else:
3768 else:
3761 for r in repo.revs(expr):
3769 for r in repo.revs(expr):
3762 pass
3770 pass
3763
3771
3764 timer(d)
3772 timer(d)
3765 fm.end()
3773 fm.end()
3766
3774
3767
3775
3768 @command(
3776 @command(
3769 b'perf::volatilesets|perfvolatilesets',
3777 b'perf::volatilesets|perfvolatilesets',
3770 [
3778 [
3771 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3779 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3772 ]
3780 ]
3773 + formatteropts,
3781 + formatteropts,
3774 )
3782 )
3775 def perfvolatilesets(ui, repo, *names, **opts):
3783 def perfvolatilesets(ui, repo, *names, **opts):
3776 """benchmark the computation of various volatile set
3784 """benchmark the computation of various volatile set
3777
3785
3778 Volatile set computes element related to filtering and obsolescence."""
3786 Volatile set computes element related to filtering and obsolescence."""
3779 opts = _byteskwargs(opts)
3787 opts = _byteskwargs(opts)
3780 timer, fm = gettimer(ui, opts)
3788 timer, fm = gettimer(ui, opts)
3781 repo = repo.unfiltered()
3789 repo = repo.unfiltered()
3782
3790
3783 def getobs(name):
3791 def getobs(name):
3784 def d():
3792 def d():
3785 repo.invalidatevolatilesets()
3793 repo.invalidatevolatilesets()
3786 if opts[b'clear_obsstore']:
3794 if opts[b'clear_obsstore']:
3787 clearfilecache(repo, b'obsstore')
3795 clearfilecache(repo, b'obsstore')
3788 obsolete.getrevs(repo, name)
3796 obsolete.getrevs(repo, name)
3789
3797
3790 return d
3798 return d
3791
3799
3792 allobs = sorted(obsolete.cachefuncs)
3800 allobs = sorted(obsolete.cachefuncs)
3793 if names:
3801 if names:
3794 allobs = [n for n in allobs if n in names]
3802 allobs = [n for n in allobs if n in names]
3795
3803
3796 for name in allobs:
3804 for name in allobs:
3797 timer(getobs(name), title=name)
3805 timer(getobs(name), title=name)
3798
3806
3799 def getfiltered(name):
3807 def getfiltered(name):
3800 def d():
3808 def d():
3801 repo.invalidatevolatilesets()
3809 repo.invalidatevolatilesets()
3802 if opts[b'clear_obsstore']:
3810 if opts[b'clear_obsstore']:
3803 clearfilecache(repo, b'obsstore')
3811 clearfilecache(repo, b'obsstore')
3804 repoview.filterrevs(repo, name)
3812 repoview.filterrevs(repo, name)
3805
3813
3806 return d
3814 return d
3807
3815
3808 allfilter = sorted(repoview.filtertable)
3816 allfilter = sorted(repoview.filtertable)
3809 if names:
3817 if names:
3810 allfilter = [n for n in allfilter if n in names]
3818 allfilter = [n for n in allfilter if n in names]
3811
3819
3812 for name in allfilter:
3820 for name in allfilter:
3813 timer(getfiltered(name), title=name)
3821 timer(getfiltered(name), title=name)
3814 fm.end()
3822 fm.end()
3815
3823
3816
3824
3817 @command(
3825 @command(
3818 b'perf::branchmap|perfbranchmap',
3826 b'perf::branchmap|perfbranchmap',
3819 [
3827 [
3820 (b'f', b'full', False, b'Includes build time of subset'),
3828 (b'f', b'full', False, b'Includes build time of subset'),
3821 (
3829 (
3822 b'',
3830 b'',
3823 b'clear-revbranch',
3831 b'clear-revbranch',
3824 False,
3832 False,
3825 b'purge the revbranch cache between computation',
3833 b'purge the revbranch cache between computation',
3826 ),
3834 ),
3827 ]
3835 ]
3828 + formatteropts,
3836 + formatteropts,
3829 )
3837 )
3830 def perfbranchmap(ui, repo, *filternames, **opts):
3838 def perfbranchmap(ui, repo, *filternames, **opts):
3831 """benchmark the update of a branchmap
3839 """benchmark the update of a branchmap
3832
3840
3833 This benchmarks the full repo.branchmap() call with read and write disabled
3841 This benchmarks the full repo.branchmap() call with read and write disabled
3834 """
3842 """
3835 opts = _byteskwargs(opts)
3843 opts = _byteskwargs(opts)
3836 full = opts.get(b"full", False)
3844 full = opts.get(b"full", False)
3837 clear_revbranch = opts.get(b"clear_revbranch", False)
3845 clear_revbranch = opts.get(b"clear_revbranch", False)
3838 timer, fm = gettimer(ui, opts)
3846 timer, fm = gettimer(ui, opts)
3839
3847
3840 def getbranchmap(filtername):
3848 def getbranchmap(filtername):
3841 """generate a benchmark function for the filtername"""
3849 """generate a benchmark function for the filtername"""
3842 if filtername is None:
3850 if filtername is None:
3843 view = repo
3851 view = repo
3844 else:
3852 else:
3845 view = repo.filtered(filtername)
3853 view = repo.filtered(filtername)
3846 if util.safehasattr(view._branchcaches, '_per_filter'):
3854 if util.safehasattr(view._branchcaches, '_per_filter'):
3847 filtered = view._branchcaches._per_filter
3855 filtered = view._branchcaches._per_filter
3848 else:
3856 else:
3849 # older versions
3857 # older versions
3850 filtered = view._branchcaches
3858 filtered = view._branchcaches
3851
3859
3852 def d():
3860 def d():
3853 if clear_revbranch:
3861 if clear_revbranch:
3854 repo.revbranchcache()._clear()
3862 repo.revbranchcache()._clear()
3855 if full:
3863 if full:
3856 view._branchcaches.clear()
3864 view._branchcaches.clear()
3857 else:
3865 else:
3858 filtered.pop(filtername, None)
3866 filtered.pop(filtername, None)
3859 view.branchmap()
3867 view.branchmap()
3860
3868
3861 return d
3869 return d
3862
3870
3863 # add filter in smaller subset to bigger subset
3871 # add filter in smaller subset to bigger subset
3864 possiblefilters = set(repoview.filtertable)
3872 possiblefilters = set(repoview.filtertable)
3865 if filternames:
3873 if filternames:
3866 possiblefilters &= set(filternames)
3874 possiblefilters &= set(filternames)
3867 subsettable = getbranchmapsubsettable()
3875 subsettable = getbranchmapsubsettable()
3868 allfilters = []
3876 allfilters = []
3869 while possiblefilters:
3877 while possiblefilters:
3870 for name in possiblefilters:
3878 for name in possiblefilters:
3871 subset = subsettable.get(name)
3879 subset = subsettable.get(name)
3872 if subset not in possiblefilters:
3880 if subset not in possiblefilters:
3873 break
3881 break
3874 else:
3882 else:
3875 assert False, b'subset cycle %s!' % possiblefilters
3883 assert False, b'subset cycle %s!' % possiblefilters
3876 allfilters.append(name)
3884 allfilters.append(name)
3877 possiblefilters.remove(name)
3885 possiblefilters.remove(name)
3878
3886
3879 # warm the cache
3887 # warm the cache
3880 if not full:
3888 if not full:
3881 for name in allfilters:
3889 for name in allfilters:
3882 repo.filtered(name).branchmap()
3890 repo.filtered(name).branchmap()
3883 if not filternames or b'unfiltered' in filternames:
3891 if not filternames or b'unfiltered' in filternames:
3884 # add unfiltered
3892 # add unfiltered
3885 allfilters.append(None)
3893 allfilters.append(None)
3886
3894
3887 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3895 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3888 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3896 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3889 branchcacheread.set(classmethod(lambda *args: None))
3897 branchcacheread.set(classmethod(lambda *args: None))
3890 else:
3898 else:
3891 # older versions
3899 # older versions
3892 branchcacheread = safeattrsetter(branchmap, b'read')
3900 branchcacheread = safeattrsetter(branchmap, b'read')
3893 branchcacheread.set(lambda *args: None)
3901 branchcacheread.set(lambda *args: None)
3894 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3902 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3895 branchcachewrite.set(lambda *args: None)
3903 branchcachewrite.set(lambda *args: None)
3896 try:
3904 try:
3897 for name in allfilters:
3905 for name in allfilters:
3898 printname = name
3906 printname = name
3899 if name is None:
3907 if name is None:
3900 printname = b'unfiltered'
3908 printname = b'unfiltered'
3901 timer(getbranchmap(name), title=printname)
3909 timer(getbranchmap(name), title=printname)
3902 finally:
3910 finally:
3903 branchcacheread.restore()
3911 branchcacheread.restore()
3904 branchcachewrite.restore()
3912 branchcachewrite.restore()
3905 fm.end()
3913 fm.end()
3906
3914
3907
3915
3908 @command(
3916 @command(
3909 b'perf::branchmapupdate|perfbranchmapupdate',
3917 b'perf::branchmapupdate|perfbranchmapupdate',
3910 [
3918 [
3911 (b'', b'base', [], b'subset of revision to start from'),
3919 (b'', b'base', [], b'subset of revision to start from'),
3912 (b'', b'target', [], b'subset of revision to end with'),
3920 (b'', b'target', [], b'subset of revision to end with'),
3913 (b'', b'clear-caches', False, b'clear cache between each runs'),
3921 (b'', b'clear-caches', False, b'clear cache between each runs'),
3914 ]
3922 ]
3915 + formatteropts,
3923 + formatteropts,
3916 )
3924 )
3917 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3925 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3918 """benchmark branchmap update from for <base> revs to <target> revs
3926 """benchmark branchmap update from for <base> revs to <target> revs
3919
3927
3920 If `--clear-caches` is passed, the following items will be reset before
3928 If `--clear-caches` is passed, the following items will be reset before
3921 each update:
3929 each update:
3922 * the changelog instance and associated indexes
3930 * the changelog instance and associated indexes
3923 * the rev-branch-cache instance
3931 * the rev-branch-cache instance
3924
3932
3925 Examples:
3933 Examples:
3926
3934
3927 # update for the one last revision
3935 # update for the one last revision
3928 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3936 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3929
3937
3930 $ update for change coming with a new branch
3938 $ update for change coming with a new branch
3931 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3939 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3932 """
3940 """
3933 from mercurial import branchmap
3941 from mercurial import branchmap
3934 from mercurial import repoview
3942 from mercurial import repoview
3935
3943
3936 opts = _byteskwargs(opts)
3944 opts = _byteskwargs(opts)
3937 timer, fm = gettimer(ui, opts)
3945 timer, fm = gettimer(ui, opts)
3938 clearcaches = opts[b'clear_caches']
3946 clearcaches = opts[b'clear_caches']
3939 unfi = repo.unfiltered()
3947 unfi = repo.unfiltered()
3940 x = [None] # used to pass data between closure
3948 x = [None] # used to pass data between closure
3941
3949
3942 # we use a `list` here to avoid possible side effect from smartset
3950 # we use a `list` here to avoid possible side effect from smartset
3943 baserevs = list(scmutil.revrange(repo, base))
3951 baserevs = list(scmutil.revrange(repo, base))
3944 targetrevs = list(scmutil.revrange(repo, target))
3952 targetrevs = list(scmutil.revrange(repo, target))
3945 if not baserevs:
3953 if not baserevs:
3946 raise error.Abort(b'no revisions selected for --base')
3954 raise error.Abort(b'no revisions selected for --base')
3947 if not targetrevs:
3955 if not targetrevs:
3948 raise error.Abort(b'no revisions selected for --target')
3956 raise error.Abort(b'no revisions selected for --target')
3949
3957
3950 # make sure the target branchmap also contains the one in the base
3958 # make sure the target branchmap also contains the one in the base
3951 targetrevs = list(set(baserevs) | set(targetrevs))
3959 targetrevs = list(set(baserevs) | set(targetrevs))
3952 targetrevs.sort()
3960 targetrevs.sort()
3953
3961
3954 cl = repo.changelog
3962 cl = repo.changelog
3955 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3963 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3956 allbaserevs.sort()
3964 allbaserevs.sort()
3957 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3965 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3958
3966
3959 newrevs = list(alltargetrevs.difference(allbaserevs))
3967 newrevs = list(alltargetrevs.difference(allbaserevs))
3960 newrevs.sort()
3968 newrevs.sort()
3961
3969
3962 allrevs = frozenset(unfi.changelog.revs())
3970 allrevs = frozenset(unfi.changelog.revs())
3963 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3971 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3964 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3972 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3965
3973
3966 def basefilter(repo, visibilityexceptions=None):
3974 def basefilter(repo, visibilityexceptions=None):
3967 return basefilterrevs
3975 return basefilterrevs
3968
3976
3969 def targetfilter(repo, visibilityexceptions=None):
3977 def targetfilter(repo, visibilityexceptions=None):
3970 return targetfilterrevs
3978 return targetfilterrevs
3971
3979
3972 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3980 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3973 ui.status(msg % (len(allbaserevs), len(newrevs)))
3981 ui.status(msg % (len(allbaserevs), len(newrevs)))
3974 if targetfilterrevs:
3982 if targetfilterrevs:
3975 msg = b'(%d revisions still filtered)\n'
3983 msg = b'(%d revisions still filtered)\n'
3976 ui.status(msg % len(targetfilterrevs))
3984 ui.status(msg % len(targetfilterrevs))
3977
3985
3978 try:
3986 try:
3979 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3987 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3980 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3988 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3981
3989
3982 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3990 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3983 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3991 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3984
3992
3985 # try to find an existing branchmap to reuse
3993 # try to find an existing branchmap to reuse
3986 subsettable = getbranchmapsubsettable()
3994 subsettable = getbranchmapsubsettable()
3987 candidatefilter = subsettable.get(None)
3995 candidatefilter = subsettable.get(None)
3988 while candidatefilter is not None:
3996 while candidatefilter is not None:
3989 candidatebm = repo.filtered(candidatefilter).branchmap()
3997 candidatebm = repo.filtered(candidatefilter).branchmap()
3990 if candidatebm.validfor(baserepo):
3998 if candidatebm.validfor(baserepo):
3991 filtered = repoview.filterrevs(repo, candidatefilter)
3999 filtered = repoview.filterrevs(repo, candidatefilter)
3992 missing = [r for r in allbaserevs if r in filtered]
4000 missing = [r for r in allbaserevs if r in filtered]
3993 base = candidatebm.copy()
4001 base = candidatebm.copy()
3994 base.update(baserepo, missing)
4002 base.update(baserepo, missing)
3995 break
4003 break
3996 candidatefilter = subsettable.get(candidatefilter)
4004 candidatefilter = subsettable.get(candidatefilter)
3997 else:
4005 else:
3998 # no suitable subset where found
4006 # no suitable subset where found
3999 base = branchmap.branchcache()
4007 base = branchmap.branchcache()
4000 base.update(baserepo, allbaserevs)
4008 base.update(baserepo, allbaserevs)
4001
4009
4002 def setup():
4010 def setup():
4003 x[0] = base.copy()
4011 x[0] = base.copy()
4004 if clearcaches:
4012 if clearcaches:
4005 unfi._revbranchcache = None
4013 unfi._revbranchcache = None
4006 clearchangelog(repo)
4014 clearchangelog(repo)
4007
4015
4008 def bench():
4016 def bench():
4009 x[0].update(targetrepo, newrevs)
4017 x[0].update(targetrepo, newrevs)
4010
4018
4011 timer(bench, setup=setup)
4019 timer(bench, setup=setup)
4012 fm.end()
4020 fm.end()
4013 finally:
4021 finally:
4014 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4022 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4015 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4023 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4016
4024
4017
4025
4018 @command(
4026 @command(
4019 b'perf::branchmapload|perfbranchmapload',
4027 b'perf::branchmapload|perfbranchmapload',
4020 [
4028 [
4021 (b'f', b'filter', b'', b'Specify repoview filter'),
4029 (b'f', b'filter', b'', b'Specify repoview filter'),
4022 (b'', b'list', False, b'List brachmap filter caches'),
4030 (b'', b'list', False, b'List brachmap filter caches'),
4023 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4031 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4024 ]
4032 ]
4025 + formatteropts,
4033 + formatteropts,
4026 )
4034 )
4027 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4035 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4028 """benchmark reading the branchmap"""
4036 """benchmark reading the branchmap"""
4029 opts = _byteskwargs(opts)
4037 opts = _byteskwargs(opts)
4030 clearrevlogs = opts[b'clear_revlogs']
4038 clearrevlogs = opts[b'clear_revlogs']
4031
4039
4032 if list:
4040 if list:
4033 for name, kind, st in repo.cachevfs.readdir(stat=True):
4041 for name, kind, st in repo.cachevfs.readdir(stat=True):
4034 if name.startswith(b'branch2'):
4042 if name.startswith(b'branch2'):
4035 filtername = name.partition(b'-')[2] or b'unfiltered'
4043 filtername = name.partition(b'-')[2] or b'unfiltered'
4036 ui.status(
4044 ui.status(
4037 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4045 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4038 )
4046 )
4039 return
4047 return
4040 if not filter:
4048 if not filter:
4041 filter = None
4049 filter = None
4042 subsettable = getbranchmapsubsettable()
4050 subsettable = getbranchmapsubsettable()
4043 if filter is None:
4051 if filter is None:
4044 repo = repo.unfiltered()
4052 repo = repo.unfiltered()
4045 else:
4053 else:
4046 repo = repoview.repoview(repo, filter)
4054 repo = repoview.repoview(repo, filter)
4047
4055
4048 repo.branchmap() # make sure we have a relevant, up to date branchmap
4056 repo.branchmap() # make sure we have a relevant, up to date branchmap
4049
4057
4050 try:
4058 try:
4051 fromfile = branchmap.branchcache.fromfile
4059 fromfile = branchmap.branchcache.fromfile
4052 except AttributeError:
4060 except AttributeError:
4053 # older versions
4061 # older versions
4054 fromfile = branchmap.read
4062 fromfile = branchmap.read
4055
4063
4056 currentfilter = filter
4064 currentfilter = filter
4057 # try once without timer, the filter may not be cached
4065 # try once without timer, the filter may not be cached
4058 while fromfile(repo) is None:
4066 while fromfile(repo) is None:
4059 currentfilter = subsettable.get(currentfilter)
4067 currentfilter = subsettable.get(currentfilter)
4060 if currentfilter is None:
4068 if currentfilter is None:
4061 raise error.Abort(
4069 raise error.Abort(
4062 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4070 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4063 )
4071 )
4064 repo = repo.filtered(currentfilter)
4072 repo = repo.filtered(currentfilter)
4065 timer, fm = gettimer(ui, opts)
4073 timer, fm = gettimer(ui, opts)
4066
4074
4067 def setup():
4075 def setup():
4068 if clearrevlogs:
4076 if clearrevlogs:
4069 clearchangelog(repo)
4077 clearchangelog(repo)
4070
4078
4071 def bench():
4079 def bench():
4072 fromfile(repo)
4080 fromfile(repo)
4073
4081
4074 timer(bench, setup=setup)
4082 timer(bench, setup=setup)
4075 fm.end()
4083 fm.end()
4076
4084
4077
4085
4078 @command(b'perf::loadmarkers|perfloadmarkers')
4086 @command(b'perf::loadmarkers|perfloadmarkers')
4079 def perfloadmarkers(ui, repo):
4087 def perfloadmarkers(ui, repo):
4080 """benchmark the time to parse the on-disk markers for a repo
4088 """benchmark the time to parse the on-disk markers for a repo
4081
4089
4082 Result is the number of markers in the repo."""
4090 Result is the number of markers in the repo."""
4083 timer, fm = gettimer(ui)
4091 timer, fm = gettimer(ui)
4084 svfs = getsvfs(repo)
4092 svfs = getsvfs(repo)
4085 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4093 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4086 fm.end()
4094 fm.end()
4087
4095
4088
4096
4089 @command(
4097 @command(
4090 b'perf::lrucachedict|perflrucachedict',
4098 b'perf::lrucachedict|perflrucachedict',
4091 formatteropts
4099 formatteropts
4092 + [
4100 + [
4093 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4101 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4094 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4102 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4095 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4103 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4096 (b'', b'size', 4, b'size of cache'),
4104 (b'', b'size', 4, b'size of cache'),
4097 (b'', b'gets', 10000, b'number of key lookups'),
4105 (b'', b'gets', 10000, b'number of key lookups'),
4098 (b'', b'sets', 10000, b'number of key sets'),
4106 (b'', b'sets', 10000, b'number of key sets'),
4099 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4107 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4100 (
4108 (
4101 b'',
4109 b'',
4102 b'mixedgetfreq',
4110 b'mixedgetfreq',
4103 50,
4111 50,
4104 b'frequency of get vs set ops in mixed mode',
4112 b'frequency of get vs set ops in mixed mode',
4105 ),
4113 ),
4106 ],
4114 ],
4107 norepo=True,
4115 norepo=True,
4108 )
4116 )
4109 def perflrucache(
4117 def perflrucache(
4110 ui,
4118 ui,
4111 mincost=0,
4119 mincost=0,
4112 maxcost=100,
4120 maxcost=100,
4113 costlimit=0,
4121 costlimit=0,
4114 size=4,
4122 size=4,
4115 gets=10000,
4123 gets=10000,
4116 sets=10000,
4124 sets=10000,
4117 mixed=10000,
4125 mixed=10000,
4118 mixedgetfreq=50,
4126 mixedgetfreq=50,
4119 **opts
4127 **opts
4120 ):
4128 ):
4121 opts = _byteskwargs(opts)
4129 opts = _byteskwargs(opts)
4122
4130
4123 def doinit():
4131 def doinit():
4124 for i in _xrange(10000):
4132 for i in _xrange(10000):
4125 util.lrucachedict(size)
4133 util.lrucachedict(size)
4126
4134
4127 costrange = list(range(mincost, maxcost + 1))
4135 costrange = list(range(mincost, maxcost + 1))
4128
4136
4129 values = []
4137 values = []
4130 for i in _xrange(size):
4138 for i in _xrange(size):
4131 values.append(random.randint(0, _maxint))
4139 values.append(random.randint(0, _maxint))
4132
4140
4133 # Get mode fills the cache and tests raw lookup performance with no
4141 # Get mode fills the cache and tests raw lookup performance with no
4134 # eviction.
4142 # eviction.
4135 getseq = []
4143 getseq = []
4136 for i in _xrange(gets):
4144 for i in _xrange(gets):
4137 getseq.append(random.choice(values))
4145 getseq.append(random.choice(values))
4138
4146
4139 def dogets():
4147 def dogets():
4140 d = util.lrucachedict(size)
4148 d = util.lrucachedict(size)
4141 for v in values:
4149 for v in values:
4142 d[v] = v
4150 d[v] = v
4143 for key in getseq:
4151 for key in getseq:
4144 value = d[key]
4152 value = d[key]
4145 value # silence pyflakes warning
4153 value # silence pyflakes warning
4146
4154
4147 def dogetscost():
4155 def dogetscost():
4148 d = util.lrucachedict(size, maxcost=costlimit)
4156 d = util.lrucachedict(size, maxcost=costlimit)
4149 for i, v in enumerate(values):
4157 for i, v in enumerate(values):
4150 d.insert(v, v, cost=costs[i])
4158 d.insert(v, v, cost=costs[i])
4151 for key in getseq:
4159 for key in getseq:
4152 try:
4160 try:
4153 value = d[key]
4161 value = d[key]
4154 value # silence pyflakes warning
4162 value # silence pyflakes warning
4155 except KeyError:
4163 except KeyError:
4156 pass
4164 pass
4157
4165
4158 # Set mode tests insertion speed with cache eviction.
4166 # Set mode tests insertion speed with cache eviction.
4159 setseq = []
4167 setseq = []
4160 costs = []
4168 costs = []
4161 for i in _xrange(sets):
4169 for i in _xrange(sets):
4162 setseq.append(random.randint(0, _maxint))
4170 setseq.append(random.randint(0, _maxint))
4163 costs.append(random.choice(costrange))
4171 costs.append(random.choice(costrange))
4164
4172
4165 def doinserts():
4173 def doinserts():
4166 d = util.lrucachedict(size)
4174 d = util.lrucachedict(size)
4167 for v in setseq:
4175 for v in setseq:
4168 d.insert(v, v)
4176 d.insert(v, v)
4169
4177
4170 def doinsertscost():
4178 def doinsertscost():
4171 d = util.lrucachedict(size, maxcost=costlimit)
4179 d = util.lrucachedict(size, maxcost=costlimit)
4172 for i, v in enumerate(setseq):
4180 for i, v in enumerate(setseq):
4173 d.insert(v, v, cost=costs[i])
4181 d.insert(v, v, cost=costs[i])
4174
4182
4175 def dosets():
4183 def dosets():
4176 d = util.lrucachedict(size)
4184 d = util.lrucachedict(size)
4177 for v in setseq:
4185 for v in setseq:
4178 d[v] = v
4186 d[v] = v
4179
4187
4180 # Mixed mode randomly performs gets and sets with eviction.
4188 # Mixed mode randomly performs gets and sets with eviction.
4181 mixedops = []
4189 mixedops = []
4182 for i in _xrange(mixed):
4190 for i in _xrange(mixed):
4183 r = random.randint(0, 100)
4191 r = random.randint(0, 100)
4184 if r < mixedgetfreq:
4192 if r < mixedgetfreq:
4185 op = 0
4193 op = 0
4186 else:
4194 else:
4187 op = 1
4195 op = 1
4188
4196
4189 mixedops.append(
4197 mixedops.append(
4190 (op, random.randint(0, size * 2), random.choice(costrange))
4198 (op, random.randint(0, size * 2), random.choice(costrange))
4191 )
4199 )
4192
4200
4193 def domixed():
4201 def domixed():
4194 d = util.lrucachedict(size)
4202 d = util.lrucachedict(size)
4195
4203
4196 for op, v, cost in mixedops:
4204 for op, v, cost in mixedops:
4197 if op == 0:
4205 if op == 0:
4198 try:
4206 try:
4199 d[v]
4207 d[v]
4200 except KeyError:
4208 except KeyError:
4201 pass
4209 pass
4202 else:
4210 else:
4203 d[v] = v
4211 d[v] = v
4204
4212
4205 def domixedcost():
4213 def domixedcost():
4206 d = util.lrucachedict(size, maxcost=costlimit)
4214 d = util.lrucachedict(size, maxcost=costlimit)
4207
4215
4208 for op, v, cost in mixedops:
4216 for op, v, cost in mixedops:
4209 if op == 0:
4217 if op == 0:
4210 try:
4218 try:
4211 d[v]
4219 d[v]
4212 except KeyError:
4220 except KeyError:
4213 pass
4221 pass
4214 else:
4222 else:
4215 d.insert(v, v, cost=cost)
4223 d.insert(v, v, cost=cost)
4216
4224
4217 benches = [
4225 benches = [
4218 (doinit, b'init'),
4226 (doinit, b'init'),
4219 ]
4227 ]
4220
4228
4221 if costlimit:
4229 if costlimit:
4222 benches.extend(
4230 benches.extend(
4223 [
4231 [
4224 (dogetscost, b'gets w/ cost limit'),
4232 (dogetscost, b'gets w/ cost limit'),
4225 (doinsertscost, b'inserts w/ cost limit'),
4233 (doinsertscost, b'inserts w/ cost limit'),
4226 (domixedcost, b'mixed w/ cost limit'),
4234 (domixedcost, b'mixed w/ cost limit'),
4227 ]
4235 ]
4228 )
4236 )
4229 else:
4237 else:
4230 benches.extend(
4238 benches.extend(
4231 [
4239 [
4232 (dogets, b'gets'),
4240 (dogets, b'gets'),
4233 (doinserts, b'inserts'),
4241 (doinserts, b'inserts'),
4234 (dosets, b'sets'),
4242 (dosets, b'sets'),
4235 (domixed, b'mixed'),
4243 (domixed, b'mixed'),
4236 ]
4244 ]
4237 )
4245 )
4238
4246
4239 for fn, title in benches:
4247 for fn, title in benches:
4240 timer, fm = gettimer(ui, opts)
4248 timer, fm = gettimer(ui, opts)
4241 timer(fn, title=title)
4249 timer(fn, title=title)
4242 fm.end()
4250 fm.end()
4243
4251
4244
4252
4245 @command(
4253 @command(
4246 b'perf::write|perfwrite',
4254 b'perf::write|perfwrite',
4247 formatteropts
4255 formatteropts
4248 + [
4256 + [
4249 (b'', b'write-method', b'write', b'ui write method'),
4257 (b'', b'write-method', b'write', b'ui write method'),
4250 (b'', b'nlines', 100, b'number of lines'),
4258 (b'', b'nlines', 100, b'number of lines'),
4251 (b'', b'nitems', 100, b'number of items (per line)'),
4259 (b'', b'nitems', 100, b'number of items (per line)'),
4252 (b'', b'item', b'x', b'item that is written'),
4260 (b'', b'item', b'x', b'item that is written'),
4253 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4261 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4254 (b'', b'flush-line', None, b'flush after each line'),
4262 (b'', b'flush-line', None, b'flush after each line'),
4255 ],
4263 ],
4256 )
4264 )
4257 def perfwrite(ui, repo, **opts):
4265 def perfwrite(ui, repo, **opts):
4258 """microbenchmark ui.write (and others)"""
4266 """microbenchmark ui.write (and others)"""
4259 opts = _byteskwargs(opts)
4267 opts = _byteskwargs(opts)
4260
4268
4261 write = getattr(ui, _sysstr(opts[b'write_method']))
4269 write = getattr(ui, _sysstr(opts[b'write_method']))
4262 nlines = int(opts[b'nlines'])
4270 nlines = int(opts[b'nlines'])
4263 nitems = int(opts[b'nitems'])
4271 nitems = int(opts[b'nitems'])
4264 item = opts[b'item']
4272 item = opts[b'item']
4265 batch_line = opts.get(b'batch_line')
4273 batch_line = opts.get(b'batch_line')
4266 flush_line = opts.get(b'flush_line')
4274 flush_line = opts.get(b'flush_line')
4267
4275
4268 if batch_line:
4276 if batch_line:
4269 line = item * nitems + b'\n'
4277 line = item * nitems + b'\n'
4270
4278
4271 def benchmark():
4279 def benchmark():
4272 for i in pycompat.xrange(nlines):
4280 for i in pycompat.xrange(nlines):
4273 if batch_line:
4281 if batch_line:
4274 write(line)
4282 write(line)
4275 else:
4283 else:
4276 for i in pycompat.xrange(nitems):
4284 for i in pycompat.xrange(nitems):
4277 write(item)
4285 write(item)
4278 write(b'\n')
4286 write(b'\n')
4279 if flush_line:
4287 if flush_line:
4280 ui.flush()
4288 ui.flush()
4281 ui.flush()
4289 ui.flush()
4282
4290
4283 timer, fm = gettimer(ui, opts)
4291 timer, fm = gettimer(ui, opts)
4284 timer(benchmark)
4292 timer(benchmark)
4285 fm.end()
4293 fm.end()
4286
4294
4287
4295
4288 def uisetup(ui):
4296 def uisetup(ui):
4289 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4297 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4290 commands, b'debugrevlogopts'
4298 commands, b'debugrevlogopts'
4291 ):
4299 ):
4292 # for "historical portability":
4300 # for "historical portability":
4293 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4301 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4294 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4302 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4295 # openrevlog() should cause failure, because it has been
4303 # openrevlog() should cause failure, because it has been
4296 # available since 3.5 (or 49c583ca48c4).
4304 # available since 3.5 (or 49c583ca48c4).
4297 def openrevlog(orig, repo, cmd, file_, opts):
4305 def openrevlog(orig, repo, cmd, file_, opts):
4298 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4306 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4299 raise error.Abort(
4307 raise error.Abort(
4300 b"This version doesn't support --dir option",
4308 b"This version doesn't support --dir option",
4301 hint=b"use 3.5 or later",
4309 hint=b"use 3.5 or later",
4302 )
4310 )
4303 return orig(repo, cmd, file_, opts)
4311 return orig(repo, cmd, file_, opts)
4304
4312
4305 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4313 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4306
4314
4307
4315
4308 @command(
4316 @command(
4309 b'perf::progress|perfprogress',
4317 b'perf::progress|perfprogress',
4310 formatteropts
4318 formatteropts
4311 + [
4319 + [
4312 (b'', b'topic', b'topic', b'topic for progress messages'),
4320 (b'', b'topic', b'topic', b'topic for progress messages'),
4313 (b'c', b'total', 1000000, b'total value we are progressing to'),
4321 (b'c', b'total', 1000000, b'total value we are progressing to'),
4314 ],
4322 ],
4315 norepo=True,
4323 norepo=True,
4316 )
4324 )
4317 def perfprogress(ui, topic=None, total=None, **opts):
4325 def perfprogress(ui, topic=None, total=None, **opts):
4318 """printing of progress bars"""
4326 """printing of progress bars"""
4319 opts = _byteskwargs(opts)
4327 opts = _byteskwargs(opts)
4320
4328
4321 timer, fm = gettimer(ui, opts)
4329 timer, fm = gettimer(ui, opts)
4322
4330
4323 def doprogress():
4331 def doprogress():
4324 with ui.makeprogress(topic, total=total) as progress:
4332 with ui.makeprogress(topic, total=total) as progress:
4325 for i in _xrange(total):
4333 for i in _xrange(total):
4326 progress.increment()
4334 progress.increment()
4327
4335
4328 timer(doprogress)
4336 timer(doprogress)
4329 fm.end()
4337 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now