##// END OF EJS Templates
perf-stream-consume: use the source repository config when applying...
marmoute -
r52449:f1512dbf default
parent child Browse files
Show More
@@ -1,4677 +1,4681 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", True)
459 displayall = ui.configbool(b"perf", b"all-timing", True)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
886 from mercurial import tags
887
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
889
890
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
892 from mercurial import tags
893
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
895
896
896
897 def _default_forget_fnodes(repo, revs):
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
899 fnodes cache"""
900 from mercurial import tags
900 from mercurial import tags
901
901
902 missing_1 = b'\xff' * 4
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
907 cache.write()
908
908
909
909
910 @command(
910 @command(
911 b'perf::tags|perftags',
911 b'perf::tags|perftags',
912 formatteropts
912 formatteropts
913 + [
913 + [
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
915 (
916 b'',
916 b'',
917 b'clear-on-disk-cache',
917 b'clear-on-disk-cache',
918 False,
918 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
919 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
920 ),
921 (
921 (
922 b'',
922 b'',
923 b'clear-fnode-cache-all',
923 b'clear-fnode-cache-all',
924 False,
924 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
925 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
926 ),
927 (
927 (
928 b'',
928 b'',
929 b'clear-fnode-cache-rev',
929 b'clear-fnode-cache-rev',
930 [],
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
932 b'REVS',
933 ),
933 ),
934 (
934 (
935 b'',
935 b'',
936 b'update-last',
936 b'update-last',
937 b'',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
939 b'N',
940 ),
940 ),
941 ],
941 ],
942 )
942 )
943 def perftags(ui, repo, **opts):
943 def perftags(ui, repo, **opts):
944 """Benchmark tags retrieval in various situation
944 """Benchmark tags retrieval in various situation
945
945
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 altering performance after the command was run. However, it does not
947 altering performance after the command was run. However, it does not
948 destroy any stored data.
948 destroy any stored data.
949 """
949 """
950 from mercurial import tags
950 from mercurial import tags
951
951
952 opts = _byteskwargs(opts)
952 opts = _byteskwargs(opts)
953 timer, fm = gettimer(ui, opts)
953 timer, fm = gettimer(ui, opts)
954 repocleartagscache = repocleartagscachefunc(repo)
954 repocleartagscache = repocleartagscachefunc(repo)
955 clearrevlogs = opts[b'clear_revlogs']
955 clearrevlogs = opts[b'clear_revlogs']
956 clear_disk = opts[b'clear_on_disk_cache']
956 clear_disk = opts[b'clear_on_disk_cache']
957 clear_fnode = opts[b'clear_fnode_cache_all']
957 clear_fnode = opts[b'clear_fnode_cache_all']
958
958
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
960 update_last_str = opts[b'update_last']
961 update_last = None
961 update_last = None
962 if update_last_str:
962 if update_last_str:
963 try:
963 try:
964 update_last = int(update_last_str)
964 update_last = int(update_last_str)
965 except ValueError:
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
967 msg %= update_last_str
968 hint = b'value should be an integer'
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
969 raise error.Abort(msg, hint=hint)
970
970
971 clear_disk_fn = getattr(
971 clear_disk_fn = getattr(
972 tags,
972 tags,
973 "clear_cache_on_disk",
973 "clear_cache_on_disk",
974 _default_clear_on_disk_tags_cache,
974 _default_clear_on_disk_tags_cache,
975 )
975 )
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
977 clear_fnodes_fn = tags.clear_cache_fnodes
977 clear_fnodes_fn = tags.clear_cache_fnodes
978 else:
978 else:
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
980 clear_fnodes_rev_fn = getattr(
980 clear_fnodes_rev_fn = getattr(
981 tags,
981 tags,
982 "forget_fnodes",
982 "forget_fnodes",
983 _default_forget_fnodes,
983 _default_forget_fnodes,
984 )
984 )
985
985
986 clear_revs = []
986 clear_revs = []
987 if clear_fnode_revs:
987 if clear_fnode_revs:
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
989
989
990 if update_last:
990 if update_last:
991 revset = b'last(all(), %d)' % update_last
991 revset = b'last(all(), %d)' % update_last
992 last_revs = repo.unfiltered().revs(revset)
992 last_revs = repo.unfiltered().revs(revset)
993 clear_revs.extend(last_revs)
993 clear_revs.extend(last_revs)
994
994
995 from mercurial import repoview
995 from mercurial import repoview
996
996
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 filter_id = repoview.extrafilter(repo.ui)
999 filter_id = repoview.extrafilter(repo.ui)
1000
1000
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 pre_repo = repo.filtered(filter_name)
1002 pre_repo = repo.filtered(filter_name)
1003 pre_repo.tags() # warm the cache
1003 pre_repo.tags() # warm the cache
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006
1006
1007 clear_revs = sorted(set(clear_revs))
1007 clear_revs = sorted(set(clear_revs))
1008
1008
1009 def s():
1009 def s():
1010 if update_last:
1010 if update_last:
1011 util.copyfile(old_tags_path, new_tags_path)
1011 util.copyfile(old_tags_path, new_tags_path)
1012 if clearrevlogs:
1012 if clearrevlogs:
1013 clearchangelog(repo)
1013 clearchangelog(repo)
1014 clearfilecache(repo.unfiltered(), 'manifest')
1014 clearfilecache(repo.unfiltered(), 'manifest')
1015 if clear_disk:
1015 if clear_disk:
1016 clear_disk_fn(repo)
1016 clear_disk_fn(repo)
1017 if clear_fnode:
1017 if clear_fnode:
1018 clear_fnodes_fn(repo)
1018 clear_fnodes_fn(repo)
1019 elif clear_revs:
1019 elif clear_revs:
1020 clear_fnodes_rev_fn(repo, clear_revs)
1020 clear_fnodes_rev_fn(repo, clear_revs)
1021 repocleartagscache()
1021 repocleartagscache()
1022
1022
1023 def t():
1023 def t():
1024 len(repo.tags())
1024 len(repo.tags())
1025
1025
1026 timer(t, setup=s)
1026 timer(t, setup=s)
1027 fm.end()
1027 fm.end()
1028
1028
1029
1029
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 def perfancestors(ui, repo, **opts):
1031 def perfancestors(ui, repo, **opts):
1032 opts = _byteskwargs(opts)
1032 opts = _byteskwargs(opts)
1033 timer, fm = gettimer(ui, opts)
1033 timer, fm = gettimer(ui, opts)
1034 heads = repo.changelog.headrevs()
1034 heads = repo.changelog.headrevs()
1035
1035
1036 def d():
1036 def d():
1037 for a in repo.changelog.ancestors(heads):
1037 for a in repo.changelog.ancestors(heads):
1038 pass
1038 pass
1039
1039
1040 timer(d)
1040 timer(d)
1041 fm.end()
1041 fm.end()
1042
1042
1043
1043
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 def perfancestorset(ui, repo, revset, **opts):
1045 def perfancestorset(ui, repo, revset, **opts):
1046 opts = _byteskwargs(opts)
1046 opts = _byteskwargs(opts)
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 revs = repo.revs(revset)
1048 revs = repo.revs(revset)
1049 heads = repo.changelog.headrevs()
1049 heads = repo.changelog.headrevs()
1050
1050
1051 def d():
1051 def d():
1052 s = repo.changelog.ancestors(heads)
1052 s = repo.changelog.ancestors(heads)
1053 for rev in revs:
1053 for rev in revs:
1054 rev in s
1054 rev in s
1055
1055
1056 timer(d)
1056 timer(d)
1057 fm.end()
1057 fm.end()
1058
1058
1059
1059
1060 @command(
1060 @command(
1061 b'perf::delta-find',
1061 b'perf::delta-find',
1062 revlogopts + formatteropts,
1062 revlogopts + formatteropts,
1063 b'-c|-m|FILE REV',
1063 b'-c|-m|FILE REV',
1064 )
1064 )
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 """benchmark the process of finding a valid delta for a revlog revision
1066 """benchmark the process of finding a valid delta for a revlog revision
1067
1067
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 This perf command measures how much time we spend in this process. It
1070 This perf command measures how much time we spend in this process. It
1071 operates on an already stored revision.
1071 operates on an already stored revision.
1072
1072
1073 See `hg help debug-delta-find` for another related command.
1073 See `hg help debug-delta-find` for another related command.
1074 """
1074 """
1075 from mercurial import revlogutils
1075 from mercurial import revlogutils
1076 import mercurial.revlogutils.deltas as deltautil
1076 import mercurial.revlogutils.deltas as deltautil
1077
1077
1078 opts = _byteskwargs(opts)
1078 opts = _byteskwargs(opts)
1079 if arg_2 is None:
1079 if arg_2 is None:
1080 file_ = None
1080 file_ = None
1081 rev = arg_1
1081 rev = arg_1
1082 else:
1082 else:
1083 file_ = arg_1
1083 file_ = arg_1
1084 rev = arg_2
1084 rev = arg_2
1085
1085
1086 repo = repo.unfiltered()
1086 repo = repo.unfiltered()
1087
1087
1088 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1089
1089
1090 rev = int(rev)
1090 rev = int(rev)
1091
1091
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093
1093
1094 deltacomputer = deltautil.deltacomputer(revlog)
1094 deltacomputer = deltautil.deltacomputer(revlog)
1095
1095
1096 node = revlog.node(rev)
1096 node = revlog.node(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1098 p1 = revlog.node(p1r)
1098 p1 = revlog.node(p1r)
1099 p2 = revlog.node(p2r)
1099 p2 = revlog.node(p2r)
1100 full_text = revlog.revision(rev)
1100 full_text = revlog.revision(rev)
1101 textlen = len(full_text)
1101 textlen = len(full_text)
1102 cachedelta = None
1102 cachedelta = None
1103 flags = revlog.flags(rev)
1103 flags = revlog.flags(rev)
1104
1104
1105 revinfo = revlogutils.revisioninfo(
1105 revinfo = revlogutils.revisioninfo(
1106 node,
1106 node,
1107 p1,
1107 p1,
1108 p2,
1108 p2,
1109 [full_text], # btext
1109 [full_text], # btext
1110 textlen,
1110 textlen,
1111 cachedelta,
1111 cachedelta,
1112 flags,
1112 flags,
1113 )
1113 )
1114
1114
1115 # Note: we should probably purge the potential caches (like the full
1115 # Note: we should probably purge the potential caches (like the full
1116 # manifest cache) between runs.
1116 # manifest cache) between runs.
1117 def find_one():
1117 def find_one():
1118 with revlog._datafp() as fh:
1118 with revlog._datafp() as fh:
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120
1120
1121 timer(find_one)
1121 timer(find_one)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 def perfdiscovery(ui, repo, path, **opts):
1126 def perfdiscovery(ui, repo, path, **opts):
1127 """benchmark discovery between local repo and the peer at given path"""
1127 """benchmark discovery between local repo and the peer at given path"""
1128 repos = [repo, None]
1128 repos = [repo, None]
1129 timer, fm = gettimer(ui, opts)
1129 timer, fm = gettimer(ui, opts)
1130
1130
1131 try:
1131 try:
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133
1133
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 except ImportError:
1135 except ImportError:
1136 try:
1136 try:
1137 from mercurial.utils.urlutil import get_unique_pull_path
1137 from mercurial.utils.urlutil import get_unique_pull_path
1138
1138
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 except ImportError:
1140 except ImportError:
1141 path = ui.expandpath(path)
1141 path = ui.expandpath(path)
1142
1142
1143 def s():
1143 def s():
1144 repos[1] = hg.peer(ui, opts, path)
1144 repos[1] = hg.peer(ui, opts, path)
1145
1145
1146 def d():
1146 def d():
1147 setdiscovery.findcommonheads(ui, *repos)
1147 setdiscovery.findcommonheads(ui, *repos)
1148
1148
1149 timer(d, setup=s)
1149 timer(d, setup=s)
1150 fm.end()
1150 fm.end()
1151
1151
1152
1152
1153 @command(
1153 @command(
1154 b'perf::bookmarks|perfbookmarks',
1154 b'perf::bookmarks|perfbookmarks',
1155 formatteropts
1155 formatteropts
1156 + [
1156 + [
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 ],
1158 ],
1159 )
1159 )
1160 def perfbookmarks(ui, repo, **opts):
1160 def perfbookmarks(ui, repo, **opts):
1161 """benchmark parsing bookmarks from disk to memory"""
1161 """benchmark parsing bookmarks from disk to memory"""
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1163 timer, fm = gettimer(ui, opts)
1164
1164
1165 clearrevlogs = opts[b'clear_revlogs']
1165 clearrevlogs = opts[b'clear_revlogs']
1166
1166
1167 def s():
1167 def s():
1168 if clearrevlogs:
1168 if clearrevlogs:
1169 clearchangelog(repo)
1169 clearchangelog(repo)
1170 clearfilecache(repo, b'_bookmarks')
1170 clearfilecache(repo, b'_bookmarks')
1171
1171
1172 def d():
1172 def d():
1173 repo._bookmarks
1173 repo._bookmarks
1174
1174
1175 timer(d, setup=s)
1175 timer(d, setup=s)
1176 fm.end()
1176 fm.end()
1177
1177
1178
1178
1179 @command(
1179 @command(
1180 b'perf::bundle',
1180 b'perf::bundle',
1181 [
1181 [
1182 (
1182 (
1183 b'r',
1183 b'r',
1184 b'rev',
1184 b'rev',
1185 [],
1185 [],
1186 b'changesets to bundle',
1186 b'changesets to bundle',
1187 b'REV',
1187 b'REV',
1188 ),
1188 ),
1189 (
1189 (
1190 b't',
1190 b't',
1191 b'type',
1191 b'type',
1192 b'none',
1192 b'none',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1194 b'TYPE',
1194 b'TYPE',
1195 ),
1195 ),
1196 ]
1196 ]
1197 + formatteropts,
1197 + formatteropts,
1198 b'REVS',
1198 b'REVS',
1199 )
1199 )
1200 def perfbundle(ui, repo, *revs, **opts):
1200 def perfbundle(ui, repo, *revs, **opts):
1201 """benchmark the creation of a bundle from a repository
1201 """benchmark the creation of a bundle from a repository
1202
1202
1203 For now, this only supports "none" compression.
1203 For now, this only supports "none" compression.
1204 """
1204 """
1205 try:
1205 try:
1206 from mercurial import bundlecaches
1206 from mercurial import bundlecaches
1207
1207
1208 parsebundlespec = bundlecaches.parsebundlespec
1208 parsebundlespec = bundlecaches.parsebundlespec
1209 except ImportError:
1209 except ImportError:
1210 from mercurial import exchange
1210 from mercurial import exchange
1211
1211
1212 parsebundlespec = exchange.parsebundlespec
1212 parsebundlespec = exchange.parsebundlespec
1213
1213
1214 from mercurial import discovery
1214 from mercurial import discovery
1215 from mercurial import bundle2
1215 from mercurial import bundle2
1216
1216
1217 opts = _byteskwargs(opts)
1217 opts = _byteskwargs(opts)
1218 timer, fm = gettimer(ui, opts)
1218 timer, fm = gettimer(ui, opts)
1219
1219
1220 cl = repo.changelog
1220 cl = repo.changelog
1221 revs = list(revs)
1221 revs = list(revs)
1222 revs.extend(opts.get(b'rev', ()))
1222 revs.extend(opts.get(b'rev', ()))
1223 revs = scmutil.revrange(repo, revs)
1223 revs = scmutil.revrange(repo, revs)
1224 if not revs:
1224 if not revs:
1225 raise error.Abort(b"not revision specified")
1225 raise error.Abort(b"not revision specified")
1226 # make it a consistent set (ie: without topological gaps)
1226 # make it a consistent set (ie: without topological gaps)
1227 old_len = len(revs)
1227 old_len = len(revs)
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 if old_len != len(revs):
1229 if old_len != len(revs):
1230 new_count = len(revs) - old_len
1230 new_count = len(revs) - old_len
1231 msg = b"add %d new revisions to make it a consistent set\n"
1231 msg = b"add %d new revisions to make it a consistent set\n"
1232 ui.write_err(msg % new_count)
1232 ui.write_err(msg % new_count)
1233
1233
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 outgoing = discovery.outgoing(repo, bases, targets)
1236 outgoing = discovery.outgoing(repo, bases, targets)
1237
1237
1238 bundle_spec = opts.get(b'type')
1238 bundle_spec = opts.get(b'type')
1239
1239
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241
1241
1242 cgversion = bundle_spec.params.get(b"cg.version")
1242 cgversion = bundle_spec.params.get(b"cg.version")
1243 if cgversion is None:
1243 if cgversion is None:
1244 if bundle_spec.version == b'v1':
1244 if bundle_spec.version == b'v1':
1245 cgversion = b'01'
1245 cgversion = b'01'
1246 if bundle_spec.version == b'v2':
1246 if bundle_spec.version == b'v2':
1247 cgversion = b'02'
1247 cgversion = b'02'
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 err = b"repository does not support bundle version %s"
1249 err = b"repository does not support bundle version %s"
1250 raise error.Abort(err % cgversion)
1250 raise error.Abort(err % cgversion)
1251
1251
1252 if cgversion == b'01': # bundle1
1252 if cgversion == b'01': # bundle1
1253 bversion = b'HG10' + bundle_spec.wirecompression
1253 bversion = b'HG10' + bundle_spec.wirecompression
1254 bcompression = None
1254 bcompression = None
1255 elif cgversion in (b'02', b'03'):
1255 elif cgversion in (b'02', b'03'):
1256 bversion = b'HG20'
1256 bversion = b'HG20'
1257 bcompression = bundle_spec.wirecompression
1257 bcompression = bundle_spec.wirecompression
1258 else:
1258 else:
1259 err = b'perf::bundle: unexpected changegroup version %s'
1259 err = b'perf::bundle: unexpected changegroup version %s'
1260 raise error.ProgrammingError(err % cgversion)
1260 raise error.ProgrammingError(err % cgversion)
1261
1261
1262 if bcompression is None:
1262 if bcompression is None:
1263 bcompression = b'UN'
1263 bcompression = b'UN'
1264
1264
1265 if bcompression != b'UN':
1265 if bcompression != b'UN':
1266 err = b'perf::bundle: compression currently unsupported: %s'
1266 err = b'perf::bundle: compression currently unsupported: %s'
1267 raise error.ProgrammingError(err % bcompression)
1267 raise error.ProgrammingError(err % bcompression)
1268
1268
1269 def do_bundle():
1269 def do_bundle():
1270 bundle2.writenewbundle(
1270 bundle2.writenewbundle(
1271 ui,
1271 ui,
1272 repo,
1272 repo,
1273 b'perf::bundle',
1273 b'perf::bundle',
1274 os.devnull,
1274 os.devnull,
1275 bversion,
1275 bversion,
1276 outgoing,
1276 outgoing,
1277 bundle_spec.params,
1277 bundle_spec.params,
1278 )
1278 )
1279
1279
1280 timer(do_bundle)
1280 timer(do_bundle)
1281 fm.end()
1281 fm.end()
1282
1282
1283
1283
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1286 """Benchmark reading of bundle files.
1286 """Benchmark reading of bundle files.
1287
1287
1288 This command is meant to isolate the I/O part of bundle reading as
1288 This command is meant to isolate the I/O part of bundle reading as
1289 much as possible.
1289 much as possible.
1290 """
1290 """
1291 from mercurial import (
1291 from mercurial import (
1292 bundle2,
1292 bundle2,
1293 exchange,
1293 exchange,
1294 streamclone,
1294 streamclone,
1295 )
1295 )
1296
1296
1297 opts = _byteskwargs(opts)
1297 opts = _byteskwargs(opts)
1298
1298
1299 def makebench(fn):
1299 def makebench(fn):
1300 def run():
1300 def run():
1301 with open(bundlepath, b'rb') as fh:
1301 with open(bundlepath, b'rb') as fh:
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 fn(bundle)
1303 fn(bundle)
1304
1304
1305 return run
1305 return run
1306
1306
1307 def makereadnbytes(size):
1307 def makereadnbytes(size):
1308 def run():
1308 def run():
1309 with open(bundlepath, b'rb') as fh:
1309 with open(bundlepath, b'rb') as fh:
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 while bundle.read(size):
1311 while bundle.read(size):
1312 pass
1312 pass
1313
1313
1314 return run
1314 return run
1315
1315
1316 def makestdioread(size):
1316 def makestdioread(size):
1317 def run():
1317 def run():
1318 with open(bundlepath, b'rb') as fh:
1318 with open(bundlepath, b'rb') as fh:
1319 while fh.read(size):
1319 while fh.read(size):
1320 pass
1320 pass
1321
1321
1322 return run
1322 return run
1323
1323
1324 # bundle1
1324 # bundle1
1325
1325
1326 def deltaiter(bundle):
1326 def deltaiter(bundle):
1327 for delta in bundle.deltaiter():
1327 for delta in bundle.deltaiter():
1328 pass
1328 pass
1329
1329
1330 def iterchunks(bundle):
1330 def iterchunks(bundle):
1331 for chunk in bundle.getchunks():
1331 for chunk in bundle.getchunks():
1332 pass
1332 pass
1333
1333
1334 # bundle2
1334 # bundle2
1335
1335
1336 def forwardchunks(bundle):
1336 def forwardchunks(bundle):
1337 for chunk in bundle._forwardchunks():
1337 for chunk in bundle._forwardchunks():
1338 pass
1338 pass
1339
1339
1340 def iterparts(bundle):
1340 def iterparts(bundle):
1341 for part in bundle.iterparts():
1341 for part in bundle.iterparts():
1342 pass
1342 pass
1343
1343
1344 def iterpartsseekable(bundle):
1344 def iterpartsseekable(bundle):
1345 for part in bundle.iterparts(seekable=True):
1345 for part in bundle.iterparts(seekable=True):
1346 pass
1346 pass
1347
1347
1348 def seek(bundle):
1348 def seek(bundle):
1349 for part in bundle.iterparts(seekable=True):
1349 for part in bundle.iterparts(seekable=True):
1350 part.seek(0, os.SEEK_END)
1350 part.seek(0, os.SEEK_END)
1351
1351
1352 def makepartreadnbytes(size):
1352 def makepartreadnbytes(size):
1353 def run():
1353 def run():
1354 with open(bundlepath, b'rb') as fh:
1354 with open(bundlepath, b'rb') as fh:
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 for part in bundle.iterparts():
1356 for part in bundle.iterparts():
1357 while part.read(size):
1357 while part.read(size):
1358 pass
1358 pass
1359
1359
1360 return run
1360 return run
1361
1361
1362 benches = [
1362 benches = [
1363 (makestdioread(8192), b'read(8k)'),
1363 (makestdioread(8192), b'read(8k)'),
1364 (makestdioread(16384), b'read(16k)'),
1364 (makestdioread(16384), b'read(16k)'),
1365 (makestdioread(32768), b'read(32k)'),
1365 (makestdioread(32768), b'read(32k)'),
1366 (makestdioread(131072), b'read(128k)'),
1366 (makestdioread(131072), b'read(128k)'),
1367 ]
1367 ]
1368
1368
1369 with open(bundlepath, b'rb') as fh:
1369 with open(bundlepath, b'rb') as fh:
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1371
1371
1372 if isinstance(bundle, changegroup.cg1unpacker):
1372 if isinstance(bundle, changegroup.cg1unpacker):
1373 benches.extend(
1373 benches.extend(
1374 [
1374 [
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 ]
1381 ]
1382 )
1382 )
1383 elif isinstance(bundle, bundle2.unbundle20):
1383 elif isinstance(bundle, bundle2.unbundle20):
1384 benches.extend(
1384 benches.extend(
1385 [
1385 [
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1388 (
1388 (
1389 makebench(iterpartsseekable),
1389 makebench(iterpartsseekable),
1390 b'bundle2 iterparts() seekable',
1390 b'bundle2 iterparts() seekable',
1391 ),
1391 ),
1392 (makebench(seek), b'bundle2 part seek()'),
1392 (makebench(seek), b'bundle2 part seek()'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 ]
1397 ]
1398 )
1398 )
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 raise error.Abort(b'stream clone bundles not supported')
1400 raise error.Abort(b'stream clone bundles not supported')
1401 else:
1401 else:
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403
1403
1404 for fn, title in benches:
1404 for fn, title in benches:
1405 timer, fm = gettimer(ui, opts)
1405 timer, fm = gettimer(ui, opts)
1406 timer(fn, title=title)
1406 timer(fn, title=title)
1407 fm.end()
1407 fm.end()
1408
1408
1409
1409
1410 @command(
1410 @command(
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 formatteropts
1412 formatteropts
1413 + [
1413 + [
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 ],
1416 ],
1417 )
1417 )
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 """Benchmark producing a changelog group for a changegroup.
1419 """Benchmark producing a changelog group for a changegroup.
1420
1420
1421 This measures the time spent processing the changelog during a
1421 This measures the time spent processing the changelog during a
1422 bundle operation. This occurs during `hg bundle` and on a server
1422 bundle operation. This occurs during `hg bundle` and on a server
1423 processing a `getbundle` wire protocol request (handles clones
1423 processing a `getbundle` wire protocol request (handles clones
1424 and pull requests).
1424 and pull requests).
1425
1425
1426 By default, all revisions are added to the changegroup.
1426 By default, all revisions are added to the changegroup.
1427 """
1427 """
1428 opts = _byteskwargs(opts)
1428 opts = _byteskwargs(opts)
1429 cl = repo.changelog
1429 cl = repo.changelog
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 bundler = changegroup.getbundler(cgversion, repo)
1431 bundler = changegroup.getbundler(cgversion, repo)
1432
1432
1433 def d():
1433 def d():
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1435 for chunk in chunks:
1435 for chunk in chunks:
1436 pass
1436 pass
1437
1437
1438 timer, fm = gettimer(ui, opts)
1438 timer, fm = gettimer(ui, opts)
1439
1439
1440 # Terminal printing can interfere with timing. So disable it.
1440 # Terminal printing can interfere with timing. So disable it.
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1442 timer(d)
1442 timer(d)
1443
1443
1444 fm.end()
1444 fm.end()
1445
1445
1446
1446
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1448 def perfdirs(ui, repo, **opts):
1448 def perfdirs(ui, repo, **opts):
1449 opts = _byteskwargs(opts)
1449 opts = _byteskwargs(opts)
1450 timer, fm = gettimer(ui, opts)
1450 timer, fm = gettimer(ui, opts)
1451 dirstate = repo.dirstate
1451 dirstate = repo.dirstate
1452 b'a' in dirstate
1452 b'a' in dirstate
1453
1453
1454 def d():
1454 def d():
1455 dirstate.hasdir(b'a')
1455 dirstate.hasdir(b'a')
1456 try:
1456 try:
1457 del dirstate._map._dirs
1457 del dirstate._map._dirs
1458 except AttributeError:
1458 except AttributeError:
1459 pass
1459 pass
1460
1460
1461 timer(d)
1461 timer(d)
1462 fm.end()
1462 fm.end()
1463
1463
1464
1464
1465 @command(
1465 @command(
1466 b'perf::dirstate|perfdirstate',
1466 b'perf::dirstate|perfdirstate',
1467 [
1467 [
1468 (
1468 (
1469 b'',
1469 b'',
1470 b'iteration',
1470 b'iteration',
1471 None,
1471 None,
1472 b'benchmark a full iteration for the dirstate',
1472 b'benchmark a full iteration for the dirstate',
1473 ),
1473 ),
1474 (
1474 (
1475 b'',
1475 b'',
1476 b'contains',
1476 b'contains',
1477 None,
1477 None,
1478 b'benchmark a large amount of `nf in dirstate` calls',
1478 b'benchmark a large amount of `nf in dirstate` calls',
1479 ),
1479 ),
1480 ]
1480 ]
1481 + formatteropts,
1481 + formatteropts,
1482 )
1482 )
1483 def perfdirstate(ui, repo, **opts):
1483 def perfdirstate(ui, repo, **opts):
1484 """benchmap the time of various distate operations
1484 """benchmap the time of various distate operations
1485
1485
1486 By default benchmark the time necessary to load a dirstate from scratch.
1486 By default benchmark the time necessary to load a dirstate from scratch.
1487 The dirstate is loaded to the point were a "contains" request can be
1487 The dirstate is loaded to the point were a "contains" request can be
1488 answered.
1488 answered.
1489 """
1489 """
1490 opts = _byteskwargs(opts)
1490 opts = _byteskwargs(opts)
1491 timer, fm = gettimer(ui, opts)
1491 timer, fm = gettimer(ui, opts)
1492 b"a" in repo.dirstate
1492 b"a" in repo.dirstate
1493
1493
1494 if opts[b'iteration'] and opts[b'contains']:
1494 if opts[b'iteration'] and opts[b'contains']:
1495 msg = b'only specify one of --iteration or --contains'
1495 msg = b'only specify one of --iteration or --contains'
1496 raise error.Abort(msg)
1496 raise error.Abort(msg)
1497
1497
1498 if opts[b'iteration']:
1498 if opts[b'iteration']:
1499 setup = None
1499 setup = None
1500 dirstate = repo.dirstate
1500 dirstate = repo.dirstate
1501
1501
1502 def d():
1502 def d():
1503 for f in dirstate:
1503 for f in dirstate:
1504 pass
1504 pass
1505
1505
1506 elif opts[b'contains']:
1506 elif opts[b'contains']:
1507 setup = None
1507 setup = None
1508 dirstate = repo.dirstate
1508 dirstate = repo.dirstate
1509 allfiles = list(dirstate)
1509 allfiles = list(dirstate)
1510 # also add file path that will be "missing" from the dirstate
1510 # also add file path that will be "missing" from the dirstate
1511 allfiles.extend([f[::-1] for f in allfiles])
1511 allfiles.extend([f[::-1] for f in allfiles])
1512
1512
1513 def d():
1513 def d():
1514 for f in allfiles:
1514 for f in allfiles:
1515 f in dirstate
1515 f in dirstate
1516
1516
1517 else:
1517 else:
1518
1518
1519 def setup():
1519 def setup():
1520 repo.dirstate.invalidate()
1520 repo.dirstate.invalidate()
1521
1521
1522 def d():
1522 def d():
1523 b"a" in repo.dirstate
1523 b"a" in repo.dirstate
1524
1524
1525 timer(d, setup=setup)
1525 timer(d, setup=setup)
1526 fm.end()
1526 fm.end()
1527
1527
1528
1528
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 def perfdirstatedirs(ui, repo, **opts):
1530 def perfdirstatedirs(ui, repo, **opts):
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 opts = _byteskwargs(opts)
1532 opts = _byteskwargs(opts)
1533 timer, fm = gettimer(ui, opts)
1533 timer, fm = gettimer(ui, opts)
1534 repo.dirstate.hasdir(b"a")
1534 repo.dirstate.hasdir(b"a")
1535
1535
1536 def setup():
1536 def setup():
1537 try:
1537 try:
1538 del repo.dirstate._map._dirs
1538 del repo.dirstate._map._dirs
1539 except AttributeError:
1539 except AttributeError:
1540 pass
1540 pass
1541
1541
1542 def d():
1542 def d():
1543 repo.dirstate.hasdir(b"a")
1543 repo.dirstate.hasdir(b"a")
1544
1544
1545 timer(d, setup=setup)
1545 timer(d, setup=setup)
1546 fm.end()
1546 fm.end()
1547
1547
1548
1548
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 def perfdirstatefoldmap(ui, repo, **opts):
1550 def perfdirstatefoldmap(ui, repo, **opts):
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1552
1552
1553 The dirstate filefoldmap cache is dropped between every request.
1553 The dirstate filefoldmap cache is dropped between every request.
1554 """
1554 """
1555 opts = _byteskwargs(opts)
1555 opts = _byteskwargs(opts)
1556 timer, fm = gettimer(ui, opts)
1556 timer, fm = gettimer(ui, opts)
1557 dirstate = repo.dirstate
1557 dirstate = repo.dirstate
1558 dirstate._map.filefoldmap.get(b'a')
1558 dirstate._map.filefoldmap.get(b'a')
1559
1559
1560 def setup():
1560 def setup():
1561 del dirstate._map.filefoldmap
1561 del dirstate._map.filefoldmap
1562
1562
1563 def d():
1563 def d():
1564 dirstate._map.filefoldmap.get(b'a')
1564 dirstate._map.filefoldmap.get(b'a')
1565
1565
1566 timer(d, setup=setup)
1566 timer(d, setup=setup)
1567 fm.end()
1567 fm.end()
1568
1568
1569
1569
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 def perfdirfoldmap(ui, repo, **opts):
1571 def perfdirfoldmap(ui, repo, **opts):
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573
1573
1574 The dirstate dirfoldmap cache is dropped between every request.
1574 The dirstate dirfoldmap cache is dropped between every request.
1575 """
1575 """
1576 opts = _byteskwargs(opts)
1576 opts = _byteskwargs(opts)
1577 timer, fm = gettimer(ui, opts)
1577 timer, fm = gettimer(ui, opts)
1578 dirstate = repo.dirstate
1578 dirstate = repo.dirstate
1579 dirstate._map.dirfoldmap.get(b'a')
1579 dirstate._map.dirfoldmap.get(b'a')
1580
1580
1581 def setup():
1581 def setup():
1582 del dirstate._map.dirfoldmap
1582 del dirstate._map.dirfoldmap
1583 try:
1583 try:
1584 del dirstate._map._dirs
1584 del dirstate._map._dirs
1585 except AttributeError:
1585 except AttributeError:
1586 pass
1586 pass
1587
1587
1588 def d():
1588 def d():
1589 dirstate._map.dirfoldmap.get(b'a')
1589 dirstate._map.dirfoldmap.get(b'a')
1590
1590
1591 timer(d, setup=setup)
1591 timer(d, setup=setup)
1592 fm.end()
1592 fm.end()
1593
1593
1594
1594
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 def perfdirstatewrite(ui, repo, **opts):
1596 def perfdirstatewrite(ui, repo, **opts):
1597 """benchmap the time it take to write a dirstate on disk"""
1597 """benchmap the time it take to write a dirstate on disk"""
1598 opts = _byteskwargs(opts)
1598 opts = _byteskwargs(opts)
1599 timer, fm = gettimer(ui, opts)
1599 timer, fm = gettimer(ui, opts)
1600 ds = repo.dirstate
1600 ds = repo.dirstate
1601 b"a" in ds
1601 b"a" in ds
1602
1602
1603 def setup():
1603 def setup():
1604 ds._dirty = True
1604 ds._dirty = True
1605
1605
1606 def d():
1606 def d():
1607 ds.write(repo.currenttransaction())
1607 ds.write(repo.currenttransaction())
1608
1608
1609 with repo.wlock():
1609 with repo.wlock():
1610 timer(d, setup=setup)
1610 timer(d, setup=setup)
1611 fm.end()
1611 fm.end()
1612
1612
1613
1613
1614 def _getmergerevs(repo, opts):
1614 def _getmergerevs(repo, opts):
1615 """parse command argument to return rev involved in merge
1615 """parse command argument to return rev involved in merge
1616
1616
1617 input: options dictionnary with `rev`, `from` and `bse`
1617 input: options dictionnary with `rev`, `from` and `bse`
1618 output: (localctx, otherctx, basectx)
1618 output: (localctx, otherctx, basectx)
1619 """
1619 """
1620 if opts[b'from']:
1620 if opts[b'from']:
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 wctx = repo[fromrev]
1622 wctx = repo[fromrev]
1623 else:
1623 else:
1624 wctx = repo[None]
1624 wctx = repo[None]
1625 # we don't want working dir files to be stat'd in the benchmark, so
1625 # we don't want working dir files to be stat'd in the benchmark, so
1626 # prime that cache
1626 # prime that cache
1627 wctx.dirty()
1627 wctx.dirty()
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 if opts[b'base']:
1629 if opts[b'base']:
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 ancestor = repo[fromrev]
1631 ancestor = repo[fromrev]
1632 else:
1632 else:
1633 ancestor = wctx.ancestor(rctx)
1633 ancestor = wctx.ancestor(rctx)
1634 return (wctx, rctx, ancestor)
1634 return (wctx, rctx, ancestor)
1635
1635
1636
1636
1637 @command(
1637 @command(
1638 b'perf::mergecalculate|perfmergecalculate',
1638 b'perf::mergecalculate|perfmergecalculate',
1639 [
1639 [
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1641 (b'', b'from', b'', b'rev to merge from'),
1641 (b'', b'from', b'', b'rev to merge from'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1643 ]
1643 ]
1644 + formatteropts,
1644 + formatteropts,
1645 )
1645 )
1646 def perfmergecalculate(ui, repo, **opts):
1646 def perfmergecalculate(ui, repo, **opts):
1647 opts = _byteskwargs(opts)
1647 opts = _byteskwargs(opts)
1648 timer, fm = gettimer(ui, opts)
1648 timer, fm = gettimer(ui, opts)
1649
1649
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651
1651
1652 def d():
1652 def d():
1653 # acceptremote is True because we don't want prompts in the middle of
1653 # acceptremote is True because we don't want prompts in the middle of
1654 # our benchmark
1654 # our benchmark
1655 merge.calculateupdates(
1655 merge.calculateupdates(
1656 repo,
1656 repo,
1657 wctx,
1657 wctx,
1658 rctx,
1658 rctx,
1659 [ancestor],
1659 [ancestor],
1660 branchmerge=False,
1660 branchmerge=False,
1661 force=False,
1661 force=False,
1662 acceptremote=True,
1662 acceptremote=True,
1663 followcopies=True,
1663 followcopies=True,
1664 )
1664 )
1665
1665
1666 timer(d)
1666 timer(d)
1667 fm.end()
1667 fm.end()
1668
1668
1669
1669
1670 @command(
1670 @command(
1671 b'perf::mergecopies|perfmergecopies',
1671 b'perf::mergecopies|perfmergecopies',
1672 [
1672 [
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1674 (b'', b'from', b'', b'rev to merge from'),
1674 (b'', b'from', b'', b'rev to merge from'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1676 ]
1676 ]
1677 + formatteropts,
1677 + formatteropts,
1678 )
1678 )
1679 def perfmergecopies(ui, repo, **opts):
1679 def perfmergecopies(ui, repo, **opts):
1680 """measure runtime of `copies.mergecopies`"""
1680 """measure runtime of `copies.mergecopies`"""
1681 opts = _byteskwargs(opts)
1681 opts = _byteskwargs(opts)
1682 timer, fm = gettimer(ui, opts)
1682 timer, fm = gettimer(ui, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684
1684
1685 def d():
1685 def d():
1686 # acceptremote is True because we don't want prompts in the middle of
1686 # acceptremote is True because we don't want prompts in the middle of
1687 # our benchmark
1687 # our benchmark
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1689
1689
1690 timer(d)
1690 timer(d)
1691 fm.end()
1691 fm.end()
1692
1692
1693
1693
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 """benchmark the copy tracing logic"""
1696 """benchmark the copy tracing logic"""
1697 opts = _byteskwargs(opts)
1697 opts = _byteskwargs(opts)
1698 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701
1701
1702 def d():
1702 def d():
1703 copies.pathcopies(ctx1, ctx2)
1703 copies.pathcopies(ctx1, ctx2)
1704
1704
1705 timer(d)
1705 timer(d)
1706 fm.end()
1706 fm.end()
1707
1707
1708
1708
1709 @command(
1709 @command(
1710 b'perf::phases|perfphases',
1710 b'perf::phases|perfphases',
1711 [
1711 [
1712 (b'', b'full', False, b'include file reading time too'),
1712 (b'', b'full', False, b'include file reading time too'),
1713 ]
1713 ]
1714 + formatteropts,
1714 + formatteropts,
1715 b"",
1715 b"",
1716 )
1716 )
1717 def perfphases(ui, repo, **opts):
1717 def perfphases(ui, repo, **opts):
1718 """benchmark phasesets computation"""
1718 """benchmark phasesets computation"""
1719 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1721 _phases = repo._phasecache
1721 _phases = repo._phasecache
1722 full = opts.get(b'full')
1722 full = opts.get(b'full')
1723 tip_rev = repo.changelog.tiprev()
1723 tip_rev = repo.changelog.tiprev()
1724
1724
1725 def d():
1725 def d():
1726 phases = _phases
1726 phases = _phases
1727 if full:
1727 if full:
1728 clearfilecache(repo, b'_phasecache')
1728 clearfilecache(repo, b'_phasecache')
1729 phases = repo._phasecache
1729 phases = repo._phasecache
1730 phases.invalidate()
1730 phases.invalidate()
1731 phases.phase(repo, tip_rev)
1731 phases.phase(repo, tip_rev)
1732
1732
1733 timer(d)
1733 timer(d)
1734 fm.end()
1734 fm.end()
1735
1735
1736
1736
1737 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1737 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1738 def perfphasesremote(ui, repo, dest=None, **opts):
1738 def perfphasesremote(ui, repo, dest=None, **opts):
1739 """benchmark time needed to analyse phases of the remote server"""
1739 """benchmark time needed to analyse phases of the remote server"""
1740 from mercurial.node import bin
1740 from mercurial.node import bin
1741 from mercurial import (
1741 from mercurial import (
1742 exchange,
1742 exchange,
1743 hg,
1743 hg,
1744 phases,
1744 phases,
1745 )
1745 )
1746
1746
1747 opts = _byteskwargs(opts)
1747 opts = _byteskwargs(opts)
1748 timer, fm = gettimer(ui, opts)
1748 timer, fm = gettimer(ui, opts)
1749
1749
1750 path = ui.getpath(dest, default=(b'default-push', b'default'))
1750 path = ui.getpath(dest, default=(b'default-push', b'default'))
1751 if not path:
1751 if not path:
1752 raise error.Abort(
1752 raise error.Abort(
1753 b'default repository not configured!',
1753 b'default repository not configured!',
1754 hint=b"see 'hg help config.paths'",
1754 hint=b"see 'hg help config.paths'",
1755 )
1755 )
1756 if util.safehasattr(path, 'main_path'):
1756 if util.safehasattr(path, 'main_path'):
1757 path = path.get_push_variant()
1757 path = path.get_push_variant()
1758 dest = path.loc
1758 dest = path.loc
1759 else:
1759 else:
1760 dest = path.pushloc or path.loc
1760 dest = path.pushloc or path.loc
1761 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1761 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1762 other = hg.peer(repo, opts, dest)
1762 other = hg.peer(repo, opts, dest)
1763
1763
1764 # easier to perform discovery through the operation
1764 # easier to perform discovery through the operation
1765 op = exchange.pushoperation(repo, other)
1765 op = exchange.pushoperation(repo, other)
1766 exchange._pushdiscoverychangeset(op)
1766 exchange._pushdiscoverychangeset(op)
1767
1767
1768 remotesubset = op.fallbackheads
1768 remotesubset = op.fallbackheads
1769
1769
1770 with other.commandexecutor() as e:
1770 with other.commandexecutor() as e:
1771 remotephases = e.callcommand(
1771 remotephases = e.callcommand(
1772 b'listkeys', {b'namespace': b'phases'}
1772 b'listkeys', {b'namespace': b'phases'}
1773 ).result()
1773 ).result()
1774 del other
1774 del other
1775 publishing = remotephases.get(b'publishing', False)
1775 publishing = remotephases.get(b'publishing', False)
1776 if publishing:
1776 if publishing:
1777 ui.statusnoi18n(b'publishing: yes\n')
1777 ui.statusnoi18n(b'publishing: yes\n')
1778 else:
1778 else:
1779 ui.statusnoi18n(b'publishing: no\n')
1779 ui.statusnoi18n(b'publishing: no\n')
1780
1780
1781 has_node = getattr(repo.changelog.index, 'has_node', None)
1781 has_node = getattr(repo.changelog.index, 'has_node', None)
1782 if has_node is None:
1782 if has_node is None:
1783 has_node = repo.changelog.nodemap.__contains__
1783 has_node = repo.changelog.nodemap.__contains__
1784 nonpublishroots = 0
1784 nonpublishroots = 0
1785 for nhex, phase in remotephases.iteritems():
1785 for nhex, phase in remotephases.iteritems():
1786 if nhex == b'publishing': # ignore data related to publish option
1786 if nhex == b'publishing': # ignore data related to publish option
1787 continue
1787 continue
1788 node = bin(nhex)
1788 node = bin(nhex)
1789 if has_node(node) and int(phase):
1789 if has_node(node) and int(phase):
1790 nonpublishroots += 1
1790 nonpublishroots += 1
1791 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1791 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1792 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1792 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1793
1793
1794 def d():
1794 def d():
1795 phases.remotephasessummary(repo, remotesubset, remotephases)
1795 phases.remotephasessummary(repo, remotesubset, remotephases)
1796
1796
1797 timer(d)
1797 timer(d)
1798 fm.end()
1798 fm.end()
1799
1799
1800
1800
1801 @command(
1801 @command(
1802 b'perf::manifest|perfmanifest',
1802 b'perf::manifest|perfmanifest',
1803 [
1803 [
1804 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1804 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1805 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1805 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1806 ]
1806 ]
1807 + formatteropts,
1807 + formatteropts,
1808 b'REV|NODE',
1808 b'REV|NODE',
1809 )
1809 )
1810 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1810 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1811 """benchmark the time to read a manifest from disk and return a usable
1811 """benchmark the time to read a manifest from disk and return a usable
1812 dict-like object
1812 dict-like object
1813
1813
1814 Manifest caches are cleared before retrieval."""
1814 Manifest caches are cleared before retrieval."""
1815 opts = _byteskwargs(opts)
1815 opts = _byteskwargs(opts)
1816 timer, fm = gettimer(ui, opts)
1816 timer, fm = gettimer(ui, opts)
1817 if not manifest_rev:
1817 if not manifest_rev:
1818 ctx = scmutil.revsingle(repo, rev, rev)
1818 ctx = scmutil.revsingle(repo, rev, rev)
1819 t = ctx.manifestnode()
1819 t = ctx.manifestnode()
1820 else:
1820 else:
1821 from mercurial.node import bin
1821 from mercurial.node import bin
1822
1822
1823 if len(rev) == 40:
1823 if len(rev) == 40:
1824 t = bin(rev)
1824 t = bin(rev)
1825 else:
1825 else:
1826 try:
1826 try:
1827 rev = int(rev)
1827 rev = int(rev)
1828
1828
1829 if util.safehasattr(repo.manifestlog, b'getstorage'):
1829 if util.safehasattr(repo.manifestlog, b'getstorage'):
1830 t = repo.manifestlog.getstorage(b'').node(rev)
1830 t = repo.manifestlog.getstorage(b'').node(rev)
1831 else:
1831 else:
1832 t = repo.manifestlog._revlog.lookup(rev)
1832 t = repo.manifestlog._revlog.lookup(rev)
1833 except ValueError:
1833 except ValueError:
1834 raise error.Abort(
1834 raise error.Abort(
1835 b'manifest revision must be integer or full node'
1835 b'manifest revision must be integer or full node'
1836 )
1836 )
1837
1837
1838 def d():
1838 def d():
1839 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1839 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1840 repo.manifestlog[t].read()
1840 repo.manifestlog[t].read()
1841
1841
1842 timer(d)
1842 timer(d)
1843 fm.end()
1843 fm.end()
1844
1844
1845
1845
1846 @command(b'perf::changeset|perfchangeset', formatteropts)
1846 @command(b'perf::changeset|perfchangeset', formatteropts)
1847 def perfchangeset(ui, repo, rev, **opts):
1847 def perfchangeset(ui, repo, rev, **opts):
1848 opts = _byteskwargs(opts)
1848 opts = _byteskwargs(opts)
1849 timer, fm = gettimer(ui, opts)
1849 timer, fm = gettimer(ui, opts)
1850 n = scmutil.revsingle(repo, rev).node()
1850 n = scmutil.revsingle(repo, rev).node()
1851
1851
1852 def d():
1852 def d():
1853 repo.changelog.read(n)
1853 repo.changelog.read(n)
1854 # repo.changelog._cache = None
1854 # repo.changelog._cache = None
1855
1855
1856 timer(d)
1856 timer(d)
1857 fm.end()
1857 fm.end()
1858
1858
1859
1859
1860 @command(b'perf::ignore|perfignore', formatteropts)
1860 @command(b'perf::ignore|perfignore', formatteropts)
1861 def perfignore(ui, repo, **opts):
1861 def perfignore(ui, repo, **opts):
1862 """benchmark operation related to computing ignore"""
1862 """benchmark operation related to computing ignore"""
1863 opts = _byteskwargs(opts)
1863 opts = _byteskwargs(opts)
1864 timer, fm = gettimer(ui, opts)
1864 timer, fm = gettimer(ui, opts)
1865 dirstate = repo.dirstate
1865 dirstate = repo.dirstate
1866
1866
1867 def setupone():
1867 def setupone():
1868 dirstate.invalidate()
1868 dirstate.invalidate()
1869 clearfilecache(dirstate, b'_ignore')
1869 clearfilecache(dirstate, b'_ignore')
1870
1870
1871 def runone():
1871 def runone():
1872 dirstate._ignore
1872 dirstate._ignore
1873
1873
1874 timer(runone, setup=setupone, title=b"load")
1874 timer(runone, setup=setupone, title=b"load")
1875 fm.end()
1875 fm.end()
1876
1876
1877
1877
1878 @command(
1878 @command(
1879 b'perf::index|perfindex',
1879 b'perf::index|perfindex',
1880 [
1880 [
1881 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1881 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1882 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1882 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1883 ]
1883 ]
1884 + formatteropts,
1884 + formatteropts,
1885 )
1885 )
1886 def perfindex(ui, repo, **opts):
1886 def perfindex(ui, repo, **opts):
1887 """benchmark index creation time followed by a lookup
1887 """benchmark index creation time followed by a lookup
1888
1888
1889 The default is to look `tip` up. Depending on the index implementation,
1889 The default is to look `tip` up. Depending on the index implementation,
1890 the revision looked up can matters. For example, an implementation
1890 the revision looked up can matters. For example, an implementation
1891 scanning the index will have a faster lookup time for `--rev tip` than for
1891 scanning the index will have a faster lookup time for `--rev tip` than for
1892 `--rev 0`. The number of looked up revisions and their order can also
1892 `--rev 0`. The number of looked up revisions and their order can also
1893 matters.
1893 matters.
1894
1894
1895 Example of useful set to test:
1895 Example of useful set to test:
1896
1896
1897 * tip
1897 * tip
1898 * 0
1898 * 0
1899 * -10:
1899 * -10:
1900 * :10
1900 * :10
1901 * -10: + :10
1901 * -10: + :10
1902 * :10: + -10:
1902 * :10: + -10:
1903 * -10000:
1903 * -10000:
1904 * -10000: + 0
1904 * -10000: + 0
1905
1905
1906 It is not currently possible to check for lookup of a missing node. For
1906 It is not currently possible to check for lookup of a missing node. For
1907 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1907 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1908 import mercurial.revlog
1908 import mercurial.revlog
1909
1909
1910 opts = _byteskwargs(opts)
1910 opts = _byteskwargs(opts)
1911 timer, fm = gettimer(ui, opts)
1911 timer, fm = gettimer(ui, opts)
1912 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1912 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1913 if opts[b'no_lookup']:
1913 if opts[b'no_lookup']:
1914 if opts['rev']:
1914 if opts['rev']:
1915 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1915 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1916 nodes = []
1916 nodes = []
1917 elif not opts[b'rev']:
1917 elif not opts[b'rev']:
1918 nodes = [repo[b"tip"].node()]
1918 nodes = [repo[b"tip"].node()]
1919 else:
1919 else:
1920 revs = scmutil.revrange(repo, opts[b'rev'])
1920 revs = scmutil.revrange(repo, opts[b'rev'])
1921 cl = repo.changelog
1921 cl = repo.changelog
1922 nodes = [cl.node(r) for r in revs]
1922 nodes = [cl.node(r) for r in revs]
1923
1923
1924 unfi = repo.unfiltered()
1924 unfi = repo.unfiltered()
1925 # find the filecache func directly
1925 # find the filecache func directly
1926 # This avoid polluting the benchmark with the filecache logic
1926 # This avoid polluting the benchmark with the filecache logic
1927 makecl = unfi.__class__.changelog.func
1927 makecl = unfi.__class__.changelog.func
1928
1928
1929 def setup():
1929 def setup():
1930 # probably not necessary, but for good measure
1930 # probably not necessary, but for good measure
1931 clearchangelog(unfi)
1931 clearchangelog(unfi)
1932
1932
1933 def d():
1933 def d():
1934 cl = makecl(unfi)
1934 cl = makecl(unfi)
1935 for n in nodes:
1935 for n in nodes:
1936 cl.rev(n)
1936 cl.rev(n)
1937
1937
1938 timer(d, setup=setup)
1938 timer(d, setup=setup)
1939 fm.end()
1939 fm.end()
1940
1940
1941
1941
1942 @command(
1942 @command(
1943 b'perf::nodemap|perfnodemap',
1943 b'perf::nodemap|perfnodemap',
1944 [
1944 [
1945 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1945 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1947 ]
1947 ]
1948 + formatteropts,
1948 + formatteropts,
1949 )
1949 )
1950 def perfnodemap(ui, repo, **opts):
1950 def perfnodemap(ui, repo, **opts):
1951 """benchmark the time necessary to look up revision from a cold nodemap
1951 """benchmark the time necessary to look up revision from a cold nodemap
1952
1952
1953 Depending on the implementation, the amount and order of revision we look
1953 Depending on the implementation, the amount and order of revision we look
1954 up can varies. Example of useful set to test:
1954 up can varies. Example of useful set to test:
1955 * tip
1955 * tip
1956 * 0
1956 * 0
1957 * -10:
1957 * -10:
1958 * :10
1958 * :10
1959 * -10: + :10
1959 * -10: + :10
1960 * :10: + -10:
1960 * :10: + -10:
1961 * -10000:
1961 * -10000:
1962 * -10000: + 0
1962 * -10000: + 0
1963
1963
1964 The command currently focus on valid binary lookup. Benchmarking for
1964 The command currently focus on valid binary lookup. Benchmarking for
1965 hexlookup, prefix lookup and missing lookup would also be valuable.
1965 hexlookup, prefix lookup and missing lookup would also be valuable.
1966 """
1966 """
1967 import mercurial.revlog
1967 import mercurial.revlog
1968
1968
1969 opts = _byteskwargs(opts)
1969 opts = _byteskwargs(opts)
1970 timer, fm = gettimer(ui, opts)
1970 timer, fm = gettimer(ui, opts)
1971 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1971 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1972
1972
1973 unfi = repo.unfiltered()
1973 unfi = repo.unfiltered()
1974 clearcaches = opts[b'clear_caches']
1974 clearcaches = opts[b'clear_caches']
1975 # find the filecache func directly
1975 # find the filecache func directly
1976 # This avoid polluting the benchmark with the filecache logic
1976 # This avoid polluting the benchmark with the filecache logic
1977 makecl = unfi.__class__.changelog.func
1977 makecl = unfi.__class__.changelog.func
1978 if not opts[b'rev']:
1978 if not opts[b'rev']:
1979 raise error.Abort(b'use --rev to specify revisions to look up')
1979 raise error.Abort(b'use --rev to specify revisions to look up')
1980 revs = scmutil.revrange(repo, opts[b'rev'])
1980 revs = scmutil.revrange(repo, opts[b'rev'])
1981 cl = repo.changelog
1981 cl = repo.changelog
1982 nodes = [cl.node(r) for r in revs]
1982 nodes = [cl.node(r) for r in revs]
1983
1983
1984 # use a list to pass reference to a nodemap from one closure to the next
1984 # use a list to pass reference to a nodemap from one closure to the next
1985 nodeget = [None]
1985 nodeget = [None]
1986
1986
1987 def setnodeget():
1987 def setnodeget():
1988 # probably not necessary, but for good measure
1988 # probably not necessary, but for good measure
1989 clearchangelog(unfi)
1989 clearchangelog(unfi)
1990 cl = makecl(unfi)
1990 cl = makecl(unfi)
1991 if util.safehasattr(cl.index, 'get_rev'):
1991 if util.safehasattr(cl.index, 'get_rev'):
1992 nodeget[0] = cl.index.get_rev
1992 nodeget[0] = cl.index.get_rev
1993 else:
1993 else:
1994 nodeget[0] = cl.nodemap.get
1994 nodeget[0] = cl.nodemap.get
1995
1995
1996 def d():
1996 def d():
1997 get = nodeget[0]
1997 get = nodeget[0]
1998 for n in nodes:
1998 for n in nodes:
1999 get(n)
1999 get(n)
2000
2000
2001 setup = None
2001 setup = None
2002 if clearcaches:
2002 if clearcaches:
2003
2003
2004 def setup():
2004 def setup():
2005 setnodeget()
2005 setnodeget()
2006
2006
2007 else:
2007 else:
2008 setnodeget()
2008 setnodeget()
2009 d() # prewarm the data structure
2009 d() # prewarm the data structure
2010 timer(d, setup=setup)
2010 timer(d, setup=setup)
2011 fm.end()
2011 fm.end()
2012
2012
2013
2013
2014 @command(b'perf::startup|perfstartup', formatteropts)
2014 @command(b'perf::startup|perfstartup', formatteropts)
2015 def perfstartup(ui, repo, **opts):
2015 def perfstartup(ui, repo, **opts):
2016 opts = _byteskwargs(opts)
2016 opts = _byteskwargs(opts)
2017 timer, fm = gettimer(ui, opts)
2017 timer, fm = gettimer(ui, opts)
2018
2018
2019 def d():
2019 def d():
2020 if os.name != 'nt':
2020 if os.name != 'nt':
2021 os.system(
2021 os.system(
2022 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2022 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2023 )
2023 )
2024 else:
2024 else:
2025 os.environ['HGRCPATH'] = r' '
2025 os.environ['HGRCPATH'] = r' '
2026 os.system("%s version -q > NUL" % sys.argv[0])
2026 os.system("%s version -q > NUL" % sys.argv[0])
2027
2027
2028 timer(d)
2028 timer(d)
2029 fm.end()
2029 fm.end()
2030
2030
2031
2031
2032 def _find_stream_generator(version):
2032 def _find_stream_generator(version):
2033 """find the proper generator function for this stream version"""
2033 """find the proper generator function for this stream version"""
2034 import mercurial.streamclone
2034 import mercurial.streamclone
2035
2035
2036 available = {}
2036 available = {}
2037
2037
2038 # try to fetch a v1 generator
2038 # try to fetch a v1 generator
2039 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2039 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2040 if generatev1 is not None:
2040 if generatev1 is not None:
2041
2041
2042 def generate(repo):
2042 def generate(repo):
2043 entries, bytes, data = generatev1(repo, None, None, True)
2043 entries, bytes, data = generatev1(repo, None, None, True)
2044 return data
2044 return data
2045
2045
2046 available[b'v1'] = generatev1
2046 available[b'v1'] = generatev1
2047 # try to fetch a v2 generator
2047 # try to fetch a v2 generator
2048 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2048 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2049 if generatev2 is not None:
2049 if generatev2 is not None:
2050
2050
2051 def generate(repo):
2051 def generate(repo):
2052 entries, bytes, data = generatev2(repo, None, None, True)
2052 entries, bytes, data = generatev2(repo, None, None, True)
2053 return data
2053 return data
2054
2054
2055 available[b'v2'] = generate
2055 available[b'v2'] = generate
2056 # try to fetch a v3 generator
2056 # try to fetch a v3 generator
2057 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2057 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2058 if generatev3 is not None:
2058 if generatev3 is not None:
2059
2059
2060 def generate(repo):
2060 def generate(repo):
2061 return generatev3(repo, None, None, True)
2061 return generatev3(repo, None, None, True)
2062
2062
2063 available[b'v3-exp'] = generate
2063 available[b'v3-exp'] = generate
2064
2064
2065 # resolve the request
2065 # resolve the request
2066 if version == b"latest":
2066 if version == b"latest":
2067 # latest is the highest non experimental version
2067 # latest is the highest non experimental version
2068 latest_key = max(v for v in available if b'-exp' not in v)
2068 latest_key = max(v for v in available if b'-exp' not in v)
2069 return available[latest_key]
2069 return available[latest_key]
2070 elif version in available:
2070 elif version in available:
2071 return available[version]
2071 return available[version]
2072 else:
2072 else:
2073 msg = b"unkown or unavailable version: %s"
2073 msg = b"unkown or unavailable version: %s"
2074 msg %= version
2074 msg %= version
2075 hint = b"available versions: %s"
2075 hint = b"available versions: %s"
2076 hint %= b', '.join(sorted(available))
2076 hint %= b', '.join(sorted(available))
2077 raise error.Abort(msg, hint=hint)
2077 raise error.Abort(msg, hint=hint)
2078
2078
2079
2079
2080 @command(
2080 @command(
2081 b'perf::stream-locked-section',
2081 b'perf::stream-locked-section',
2082 [
2082 [
2083 (
2083 (
2084 b'',
2084 b'',
2085 b'stream-version',
2085 b'stream-version',
2086 b'latest',
2086 b'latest',
2087 b'stream version to use ("v1", "v2", "v3-exp" '
2087 b'stream version to use ("v1", "v2", "v3-exp" '
2088 b'or "latest", (the default))',
2088 b'or "latest", (the default))',
2089 ),
2089 ),
2090 ]
2090 ]
2091 + formatteropts,
2091 + formatteropts,
2092 )
2092 )
2093 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2093 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2094 """benchmark the initial, repo-locked, section of a stream-clone"""
2094 """benchmark the initial, repo-locked, section of a stream-clone"""
2095
2095
2096 opts = _byteskwargs(opts)
2096 opts = _byteskwargs(opts)
2097 timer, fm = gettimer(ui, opts)
2097 timer, fm = gettimer(ui, opts)
2098
2098
2099 # deletion of the generator may trigger some cleanup that we do not want to
2099 # deletion of the generator may trigger some cleanup that we do not want to
2100 # measure
2100 # measure
2101 result_holder = [None]
2101 result_holder = [None]
2102
2102
2103 def setupone():
2103 def setupone():
2104 result_holder[0] = None
2104 result_holder[0] = None
2105
2105
2106 generate = _find_stream_generator(stream_version)
2106 generate = _find_stream_generator(stream_version)
2107
2107
2108 def runone():
2108 def runone():
2109 # the lock is held for the duration the initialisation
2109 # the lock is held for the duration the initialisation
2110 result_holder[0] = generate(repo)
2110 result_holder[0] = generate(repo)
2111
2111
2112 timer(runone, setup=setupone, title=b"load")
2112 timer(runone, setup=setupone, title=b"load")
2113 fm.end()
2113 fm.end()
2114
2114
2115
2115
2116 @command(
2116 @command(
2117 b'perf::stream-generate',
2117 b'perf::stream-generate',
2118 [
2118 [
2119 (
2119 (
2120 b'',
2120 b'',
2121 b'stream-version',
2121 b'stream-version',
2122 b'latest',
2122 b'latest',
2123 b'stream version to us ("v1", "v2", "v3-exp" '
2123 b'stream version to us ("v1", "v2", "v3-exp" '
2124 b'or "latest", (the default))',
2124 b'or "latest", (the default))',
2125 ),
2125 ),
2126 ]
2126 ]
2127 + formatteropts,
2127 + formatteropts,
2128 )
2128 )
2129 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2129 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2130 """benchmark the full generation of a stream clone"""
2130 """benchmark the full generation of a stream clone"""
2131
2131
2132 opts = _byteskwargs(opts)
2132 opts = _byteskwargs(opts)
2133 timer, fm = gettimer(ui, opts)
2133 timer, fm = gettimer(ui, opts)
2134
2134
2135 # deletion of the generator may trigger some cleanup that we do not want to
2135 # deletion of the generator may trigger some cleanup that we do not want to
2136 # measure
2136 # measure
2137
2137
2138 generate = _find_stream_generator(stream_version)
2138 generate = _find_stream_generator(stream_version)
2139
2139
2140 def runone():
2140 def runone():
2141 # the lock is held for the duration the initialisation
2141 # the lock is held for the duration the initialisation
2142 for chunk in generate(repo):
2142 for chunk in generate(repo):
2143 pass
2143 pass
2144
2144
2145 timer(runone, title=b"generate")
2145 timer(runone, title=b"generate")
2146 fm.end()
2146 fm.end()
2147
2147
2148
2148
2149 @command(
2149 @command(
2150 b'perf::stream-consume',
2150 b'perf::stream-consume',
2151 formatteropts,
2151 formatteropts,
2152 )
2152 )
2153 def perf_stream_clone_consume(ui, repo, filename, **opts):
2153 def perf_stream_clone_consume(ui, repo, filename, **opts):
2154 """benchmark the full application of a stream clone
2154 """benchmark the full application of a stream clone
2155
2155
2156 This include the creation of the repository
2156 This include the creation of the repository
2157 """
2157 """
2158 # try except to appease check code
2158 # try except to appease check code
2159 msg = b"mercurial too old, missing necessary module: %s"
2159 msg = b"mercurial too old, missing necessary module: %s"
2160 try:
2160 try:
2161 from mercurial import bundle2
2161 from mercurial import bundle2
2162 except ImportError as exc:
2162 except ImportError as exc:
2163 msg %= _bytestr(exc)
2163 msg %= _bytestr(exc)
2164 raise error.Abort(msg)
2164 raise error.Abort(msg)
2165 try:
2165 try:
2166 from mercurial import exchange
2166 from mercurial import exchange
2167 except ImportError as exc:
2167 except ImportError as exc:
2168 msg %= _bytestr(exc)
2168 msg %= _bytestr(exc)
2169 raise error.Abort(msg)
2169 raise error.Abort(msg)
2170 try:
2170 try:
2171 from mercurial import hg
2171 from mercurial import hg
2172 except ImportError as exc:
2172 except ImportError as exc:
2173 msg %= _bytestr(exc)
2173 msg %= _bytestr(exc)
2174 raise error.Abort(msg)
2174 raise error.Abort(msg)
2175 try:
2175 try:
2176 from mercurial import localrepo
2176 from mercurial import localrepo
2177 except ImportError as exc:
2177 except ImportError as exc:
2178 msg %= _bytestr(exc)
2178 msg %= _bytestr(exc)
2179 raise error.Abort(msg)
2179 raise error.Abort(msg)
2180
2180
2181 opts = _byteskwargs(opts)
2181 opts = _byteskwargs(opts)
2182 timer, fm = gettimer(ui, opts)
2182 timer, fm = gettimer(ui, opts)
2183
2183
2184 # deletion of the generator may trigger some cleanup that we do not want to
2184 # deletion of the generator may trigger some cleanup that we do not want to
2185 # measure
2185 # measure
2186 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2186 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2187 raise error.Abort("not a readable file: %s" % filename)
2187 raise error.Abort("not a readable file: %s" % filename)
2188
2188
2189 run_variables = [None, None]
2189 run_variables = [None, None]
2190
2190
2191 @contextlib.contextmanager
2191 @contextlib.contextmanager
2192 def context():
2192 def context():
2193 with open(filename, mode='rb') as bundle:
2193 with open(filename, mode='rb') as bundle:
2194 with tempfile.TemporaryDirectory() as tmp_dir:
2194 with tempfile.TemporaryDirectory() as tmp_dir:
2195 tmp_dir = fsencode(tmp_dir)
2195 tmp_dir = fsencode(tmp_dir)
2196 run_variables[0] = bundle
2196 run_variables[0] = bundle
2197 run_variables[1] = tmp_dir
2197 run_variables[1] = tmp_dir
2198 yield
2198 yield
2199 run_variables[0] = None
2199 run_variables[0] = None
2200 run_variables[1] = None
2200 run_variables[1] = None
2201
2201
2202 def runone():
2202 def runone():
2203 bundle = run_variables[0]
2203 bundle = run_variables[0]
2204 tmp_dir = run_variables[1]
2204 tmp_dir = run_variables[1]
2205
2206 # we actually wants to copy all config to ensure the repo config is
2207 # taken in account during the benchmark
2208 new_ui = repo.ui.__class__(repo.ui)
2205 # only pass ui when no srcrepo
2209 # only pass ui when no srcrepo
2206 localrepo.createrepository(
2210 localrepo.createrepository(
2207 repo.ui, tmp_dir, requirements=repo.requirements
2211 new_ui, tmp_dir, requirements=repo.requirements
2208 )
2212 )
2209 target = hg.repository(repo.ui, tmp_dir)
2213 target = hg.repository(new_ui, tmp_dir)
2210 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2214 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2211 # stream v1
2215 # stream v1
2212 if util.safehasattr(gen, 'apply'):
2216 if util.safehasattr(gen, 'apply'):
2213 gen.apply(target)
2217 gen.apply(target)
2214 else:
2218 else:
2215 with target.transaction(b"perf::stream-consume") as tr:
2219 with target.transaction(b"perf::stream-consume") as tr:
2216 bundle2.applybundle(
2220 bundle2.applybundle(
2217 target,
2221 target,
2218 gen,
2222 gen,
2219 tr,
2223 tr,
2220 source=b'unbundle',
2224 source=b'unbundle',
2221 url=filename,
2225 url=filename,
2222 )
2226 )
2223
2227
2224 timer(runone, context=context, title=b"consume")
2228 timer(runone, context=context, title=b"consume")
2225 fm.end()
2229 fm.end()
2226
2230
2227
2231
2228 @command(b'perf::parents|perfparents', formatteropts)
2232 @command(b'perf::parents|perfparents', formatteropts)
2229 def perfparents(ui, repo, **opts):
2233 def perfparents(ui, repo, **opts):
2230 """benchmark the time necessary to fetch one changeset's parents.
2234 """benchmark the time necessary to fetch one changeset's parents.
2231
2235
2232 The fetch is done using the `node identifier`, traversing all object layers
2236 The fetch is done using the `node identifier`, traversing all object layers
2233 from the repository object. The first N revisions will be used for this
2237 from the repository object. The first N revisions will be used for this
2234 benchmark. N is controlled by the ``perf.parentscount`` config option
2238 benchmark. N is controlled by the ``perf.parentscount`` config option
2235 (default: 1000).
2239 (default: 1000).
2236 """
2240 """
2237 opts = _byteskwargs(opts)
2241 opts = _byteskwargs(opts)
2238 timer, fm = gettimer(ui, opts)
2242 timer, fm = gettimer(ui, opts)
2239 # control the number of commits perfparents iterates over
2243 # control the number of commits perfparents iterates over
2240 # experimental config: perf.parentscount
2244 # experimental config: perf.parentscount
2241 count = getint(ui, b"perf", b"parentscount", 1000)
2245 count = getint(ui, b"perf", b"parentscount", 1000)
2242 if len(repo.changelog) < count:
2246 if len(repo.changelog) < count:
2243 raise error.Abort(b"repo needs %d commits for this test" % count)
2247 raise error.Abort(b"repo needs %d commits for this test" % count)
2244 repo = repo.unfiltered()
2248 repo = repo.unfiltered()
2245 nl = [repo.changelog.node(i) for i in _xrange(count)]
2249 nl = [repo.changelog.node(i) for i in _xrange(count)]
2246
2250
2247 def d():
2251 def d():
2248 for n in nl:
2252 for n in nl:
2249 repo.changelog.parents(n)
2253 repo.changelog.parents(n)
2250
2254
2251 timer(d)
2255 timer(d)
2252 fm.end()
2256 fm.end()
2253
2257
2254
2258
2255 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2259 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2256 def perfctxfiles(ui, repo, x, **opts):
2260 def perfctxfiles(ui, repo, x, **opts):
2257 opts = _byteskwargs(opts)
2261 opts = _byteskwargs(opts)
2258 x = int(x)
2262 x = int(x)
2259 timer, fm = gettimer(ui, opts)
2263 timer, fm = gettimer(ui, opts)
2260
2264
2261 def d():
2265 def d():
2262 len(repo[x].files())
2266 len(repo[x].files())
2263
2267
2264 timer(d)
2268 timer(d)
2265 fm.end()
2269 fm.end()
2266
2270
2267
2271
2268 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2272 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2269 def perfrawfiles(ui, repo, x, **opts):
2273 def perfrawfiles(ui, repo, x, **opts):
2270 opts = _byteskwargs(opts)
2274 opts = _byteskwargs(opts)
2271 x = int(x)
2275 x = int(x)
2272 timer, fm = gettimer(ui, opts)
2276 timer, fm = gettimer(ui, opts)
2273 cl = repo.changelog
2277 cl = repo.changelog
2274
2278
2275 def d():
2279 def d():
2276 len(cl.read(x)[3])
2280 len(cl.read(x)[3])
2277
2281
2278 timer(d)
2282 timer(d)
2279 fm.end()
2283 fm.end()
2280
2284
2281
2285
2282 @command(b'perf::lookup|perflookup', formatteropts)
2286 @command(b'perf::lookup|perflookup', formatteropts)
2283 def perflookup(ui, repo, rev, **opts):
2287 def perflookup(ui, repo, rev, **opts):
2284 opts = _byteskwargs(opts)
2288 opts = _byteskwargs(opts)
2285 timer, fm = gettimer(ui, opts)
2289 timer, fm = gettimer(ui, opts)
2286 timer(lambda: len(repo.lookup(rev)))
2290 timer(lambda: len(repo.lookup(rev)))
2287 fm.end()
2291 fm.end()
2288
2292
2289
2293
2290 @command(
2294 @command(
2291 b'perf::linelogedits|perflinelogedits',
2295 b'perf::linelogedits|perflinelogedits',
2292 [
2296 [
2293 (b'n', b'edits', 10000, b'number of edits'),
2297 (b'n', b'edits', 10000, b'number of edits'),
2294 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2298 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2295 ],
2299 ],
2296 norepo=True,
2300 norepo=True,
2297 )
2301 )
2298 def perflinelogedits(ui, **opts):
2302 def perflinelogedits(ui, **opts):
2299 from mercurial import linelog
2303 from mercurial import linelog
2300
2304
2301 opts = _byteskwargs(opts)
2305 opts = _byteskwargs(opts)
2302
2306
2303 edits = opts[b'edits']
2307 edits = opts[b'edits']
2304 maxhunklines = opts[b'max_hunk_lines']
2308 maxhunklines = opts[b'max_hunk_lines']
2305
2309
2306 maxb1 = 100000
2310 maxb1 = 100000
2307 random.seed(0)
2311 random.seed(0)
2308 randint = random.randint
2312 randint = random.randint
2309 currentlines = 0
2313 currentlines = 0
2310 arglist = []
2314 arglist = []
2311 for rev in _xrange(edits):
2315 for rev in _xrange(edits):
2312 a1 = randint(0, currentlines)
2316 a1 = randint(0, currentlines)
2313 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2317 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2314 b1 = randint(0, maxb1)
2318 b1 = randint(0, maxb1)
2315 b2 = randint(b1, b1 + maxhunklines)
2319 b2 = randint(b1, b1 + maxhunklines)
2316 currentlines += (b2 - b1) - (a2 - a1)
2320 currentlines += (b2 - b1) - (a2 - a1)
2317 arglist.append((rev, a1, a2, b1, b2))
2321 arglist.append((rev, a1, a2, b1, b2))
2318
2322
2319 def d():
2323 def d():
2320 ll = linelog.linelog()
2324 ll = linelog.linelog()
2321 for args in arglist:
2325 for args in arglist:
2322 ll.replacelines(*args)
2326 ll.replacelines(*args)
2323
2327
2324 timer, fm = gettimer(ui, opts)
2328 timer, fm = gettimer(ui, opts)
2325 timer(d)
2329 timer(d)
2326 fm.end()
2330 fm.end()
2327
2331
2328
2332
2329 @command(b'perf::revrange|perfrevrange', formatteropts)
2333 @command(b'perf::revrange|perfrevrange', formatteropts)
2330 def perfrevrange(ui, repo, *specs, **opts):
2334 def perfrevrange(ui, repo, *specs, **opts):
2331 opts = _byteskwargs(opts)
2335 opts = _byteskwargs(opts)
2332 timer, fm = gettimer(ui, opts)
2336 timer, fm = gettimer(ui, opts)
2333 revrange = scmutil.revrange
2337 revrange = scmutil.revrange
2334 timer(lambda: len(revrange(repo, specs)))
2338 timer(lambda: len(revrange(repo, specs)))
2335 fm.end()
2339 fm.end()
2336
2340
2337
2341
2338 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2342 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2339 def perfnodelookup(ui, repo, rev, **opts):
2343 def perfnodelookup(ui, repo, rev, **opts):
2340 opts = _byteskwargs(opts)
2344 opts = _byteskwargs(opts)
2341 timer, fm = gettimer(ui, opts)
2345 timer, fm = gettimer(ui, opts)
2342 import mercurial.revlog
2346 import mercurial.revlog
2343
2347
2344 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2348 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2345 n = scmutil.revsingle(repo, rev).node()
2349 n = scmutil.revsingle(repo, rev).node()
2346
2350
2347 try:
2351 try:
2348 cl = revlog(getsvfs(repo), radix=b"00changelog")
2352 cl = revlog(getsvfs(repo), radix=b"00changelog")
2349 except TypeError:
2353 except TypeError:
2350 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2354 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2351
2355
2352 def d():
2356 def d():
2353 cl.rev(n)
2357 cl.rev(n)
2354 clearcaches(cl)
2358 clearcaches(cl)
2355
2359
2356 timer(d)
2360 timer(d)
2357 fm.end()
2361 fm.end()
2358
2362
2359
2363
2360 @command(
2364 @command(
2361 b'perf::log|perflog',
2365 b'perf::log|perflog',
2362 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2366 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2363 )
2367 )
2364 def perflog(ui, repo, rev=None, **opts):
2368 def perflog(ui, repo, rev=None, **opts):
2365 opts = _byteskwargs(opts)
2369 opts = _byteskwargs(opts)
2366 if rev is None:
2370 if rev is None:
2367 rev = []
2371 rev = []
2368 timer, fm = gettimer(ui, opts)
2372 timer, fm = gettimer(ui, opts)
2369 ui.pushbuffer()
2373 ui.pushbuffer()
2370 timer(
2374 timer(
2371 lambda: commands.log(
2375 lambda: commands.log(
2372 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2376 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2373 )
2377 )
2374 )
2378 )
2375 ui.popbuffer()
2379 ui.popbuffer()
2376 fm.end()
2380 fm.end()
2377
2381
2378
2382
2379 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2383 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2380 def perfmoonwalk(ui, repo, **opts):
2384 def perfmoonwalk(ui, repo, **opts):
2381 """benchmark walking the changelog backwards
2385 """benchmark walking the changelog backwards
2382
2386
2383 This also loads the changelog data for each revision in the changelog.
2387 This also loads the changelog data for each revision in the changelog.
2384 """
2388 """
2385 opts = _byteskwargs(opts)
2389 opts = _byteskwargs(opts)
2386 timer, fm = gettimer(ui, opts)
2390 timer, fm = gettimer(ui, opts)
2387
2391
2388 def moonwalk():
2392 def moonwalk():
2389 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2393 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2390 ctx = repo[i]
2394 ctx = repo[i]
2391 ctx.branch() # read changelog data (in addition to the index)
2395 ctx.branch() # read changelog data (in addition to the index)
2392
2396
2393 timer(moonwalk)
2397 timer(moonwalk)
2394 fm.end()
2398 fm.end()
2395
2399
2396
2400
2397 @command(
2401 @command(
2398 b'perf::templating|perftemplating',
2402 b'perf::templating|perftemplating',
2399 [
2403 [
2400 (b'r', b'rev', [], b'revisions to run the template on'),
2404 (b'r', b'rev', [], b'revisions to run the template on'),
2401 ]
2405 ]
2402 + formatteropts,
2406 + formatteropts,
2403 )
2407 )
2404 def perftemplating(ui, repo, testedtemplate=None, **opts):
2408 def perftemplating(ui, repo, testedtemplate=None, **opts):
2405 """test the rendering time of a given template"""
2409 """test the rendering time of a given template"""
2406 if makelogtemplater is None:
2410 if makelogtemplater is None:
2407 raise error.Abort(
2411 raise error.Abort(
2408 b"perftemplating not available with this Mercurial",
2412 b"perftemplating not available with this Mercurial",
2409 hint=b"use 4.3 or later",
2413 hint=b"use 4.3 or later",
2410 )
2414 )
2411
2415
2412 opts = _byteskwargs(opts)
2416 opts = _byteskwargs(opts)
2413
2417
2414 nullui = ui.copy()
2418 nullui = ui.copy()
2415 nullui.fout = open(os.devnull, 'wb')
2419 nullui.fout = open(os.devnull, 'wb')
2416 nullui.disablepager()
2420 nullui.disablepager()
2417 revs = opts.get(b'rev')
2421 revs = opts.get(b'rev')
2418 if not revs:
2422 if not revs:
2419 revs = [b'all()']
2423 revs = [b'all()']
2420 revs = list(scmutil.revrange(repo, revs))
2424 revs = list(scmutil.revrange(repo, revs))
2421
2425
2422 defaulttemplate = (
2426 defaulttemplate = (
2423 b'{date|shortdate} [{rev}:{node|short}]'
2427 b'{date|shortdate} [{rev}:{node|short}]'
2424 b' {author|person}: {desc|firstline}\n'
2428 b' {author|person}: {desc|firstline}\n'
2425 )
2429 )
2426 if testedtemplate is None:
2430 if testedtemplate is None:
2427 testedtemplate = defaulttemplate
2431 testedtemplate = defaulttemplate
2428 displayer = makelogtemplater(nullui, repo, testedtemplate)
2432 displayer = makelogtemplater(nullui, repo, testedtemplate)
2429
2433
2430 def format():
2434 def format():
2431 for r in revs:
2435 for r in revs:
2432 ctx = repo[r]
2436 ctx = repo[r]
2433 displayer.show(ctx)
2437 displayer.show(ctx)
2434 displayer.flush(ctx)
2438 displayer.flush(ctx)
2435
2439
2436 timer, fm = gettimer(ui, opts)
2440 timer, fm = gettimer(ui, opts)
2437 timer(format)
2441 timer(format)
2438 fm.end()
2442 fm.end()
2439
2443
2440
2444
2441 def _displaystats(ui, opts, entries, data):
2445 def _displaystats(ui, opts, entries, data):
2442 # use a second formatter because the data are quite different, not sure
2446 # use a second formatter because the data are quite different, not sure
2443 # how it flies with the templater.
2447 # how it flies with the templater.
2444 fm = ui.formatter(b'perf-stats', opts)
2448 fm = ui.formatter(b'perf-stats', opts)
2445 for key, title in entries:
2449 for key, title in entries:
2446 values = data[key]
2450 values = data[key]
2447 nbvalues = len(data)
2451 nbvalues = len(data)
2448 values.sort()
2452 values.sort()
2449 stats = {
2453 stats = {
2450 'key': key,
2454 'key': key,
2451 'title': title,
2455 'title': title,
2452 'nbitems': len(values),
2456 'nbitems': len(values),
2453 'min': values[0][0],
2457 'min': values[0][0],
2454 '10%': values[(nbvalues * 10) // 100][0],
2458 '10%': values[(nbvalues * 10) // 100][0],
2455 '25%': values[(nbvalues * 25) // 100][0],
2459 '25%': values[(nbvalues * 25) // 100][0],
2456 '50%': values[(nbvalues * 50) // 100][0],
2460 '50%': values[(nbvalues * 50) // 100][0],
2457 '75%': values[(nbvalues * 75) // 100][0],
2461 '75%': values[(nbvalues * 75) // 100][0],
2458 '80%': values[(nbvalues * 80) // 100][0],
2462 '80%': values[(nbvalues * 80) // 100][0],
2459 '85%': values[(nbvalues * 85) // 100][0],
2463 '85%': values[(nbvalues * 85) // 100][0],
2460 '90%': values[(nbvalues * 90) // 100][0],
2464 '90%': values[(nbvalues * 90) // 100][0],
2461 '95%': values[(nbvalues * 95) // 100][0],
2465 '95%': values[(nbvalues * 95) // 100][0],
2462 '99%': values[(nbvalues * 99) // 100][0],
2466 '99%': values[(nbvalues * 99) // 100][0],
2463 'max': values[-1][0],
2467 'max': values[-1][0],
2464 }
2468 }
2465 fm.startitem()
2469 fm.startitem()
2466 fm.data(**stats)
2470 fm.data(**stats)
2467 # make node pretty for the human output
2471 # make node pretty for the human output
2468 fm.plain('### %s (%d items)\n' % (title, len(values)))
2472 fm.plain('### %s (%d items)\n' % (title, len(values)))
2469 lines = [
2473 lines = [
2470 'min',
2474 'min',
2471 '10%',
2475 '10%',
2472 '25%',
2476 '25%',
2473 '50%',
2477 '50%',
2474 '75%',
2478 '75%',
2475 '80%',
2479 '80%',
2476 '85%',
2480 '85%',
2477 '90%',
2481 '90%',
2478 '95%',
2482 '95%',
2479 '99%',
2483 '99%',
2480 'max',
2484 'max',
2481 ]
2485 ]
2482 for l in lines:
2486 for l in lines:
2483 fm.plain('%s: %s\n' % (l, stats[l]))
2487 fm.plain('%s: %s\n' % (l, stats[l]))
2484 fm.end()
2488 fm.end()
2485
2489
2486
2490
2487 @command(
2491 @command(
2488 b'perf::helper-mergecopies|perfhelper-mergecopies',
2492 b'perf::helper-mergecopies|perfhelper-mergecopies',
2489 formatteropts
2493 formatteropts
2490 + [
2494 + [
2491 (b'r', b'revs', [], b'restrict search to these revisions'),
2495 (b'r', b'revs', [], b'restrict search to these revisions'),
2492 (b'', b'timing', False, b'provides extra data (costly)'),
2496 (b'', b'timing', False, b'provides extra data (costly)'),
2493 (b'', b'stats', False, b'provides statistic about the measured data'),
2497 (b'', b'stats', False, b'provides statistic about the measured data'),
2494 ],
2498 ],
2495 )
2499 )
2496 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2500 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2497 """find statistics about potential parameters for `perfmergecopies`
2501 """find statistics about potential parameters for `perfmergecopies`
2498
2502
2499 This command find (base, p1, p2) triplet relevant for copytracing
2503 This command find (base, p1, p2) triplet relevant for copytracing
2500 benchmarking in the context of a merge. It reports values for some of the
2504 benchmarking in the context of a merge. It reports values for some of the
2501 parameters that impact merge copy tracing time during merge.
2505 parameters that impact merge copy tracing time during merge.
2502
2506
2503 If `--timing` is set, rename detection is run and the associated timing
2507 If `--timing` is set, rename detection is run and the associated timing
2504 will be reported. The extra details come at the cost of slower command
2508 will be reported. The extra details come at the cost of slower command
2505 execution.
2509 execution.
2506
2510
2507 Since rename detection is only run once, other factors might easily
2511 Since rename detection is only run once, other factors might easily
2508 affect the precision of the timing. However it should give a good
2512 affect the precision of the timing. However it should give a good
2509 approximation of which revision triplets are very costly.
2513 approximation of which revision triplets are very costly.
2510 """
2514 """
2511 opts = _byteskwargs(opts)
2515 opts = _byteskwargs(opts)
2512 fm = ui.formatter(b'perf', opts)
2516 fm = ui.formatter(b'perf', opts)
2513 dotiming = opts[b'timing']
2517 dotiming = opts[b'timing']
2514 dostats = opts[b'stats']
2518 dostats = opts[b'stats']
2515
2519
2516 output_template = [
2520 output_template = [
2517 ("base", "%(base)12s"),
2521 ("base", "%(base)12s"),
2518 ("p1", "%(p1.node)12s"),
2522 ("p1", "%(p1.node)12s"),
2519 ("p2", "%(p2.node)12s"),
2523 ("p2", "%(p2.node)12s"),
2520 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2524 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2521 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2525 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2522 ("p1.renames", "%(p1.renamedfiles)12d"),
2526 ("p1.renames", "%(p1.renamedfiles)12d"),
2523 ("p1.time", "%(p1.time)12.3f"),
2527 ("p1.time", "%(p1.time)12.3f"),
2524 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2528 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2525 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2529 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2526 ("p2.renames", "%(p2.renamedfiles)12d"),
2530 ("p2.renames", "%(p2.renamedfiles)12d"),
2527 ("p2.time", "%(p2.time)12.3f"),
2531 ("p2.time", "%(p2.time)12.3f"),
2528 ("renames", "%(nbrenamedfiles)12d"),
2532 ("renames", "%(nbrenamedfiles)12d"),
2529 ("total.time", "%(time)12.3f"),
2533 ("total.time", "%(time)12.3f"),
2530 ]
2534 ]
2531 if not dotiming:
2535 if not dotiming:
2532 output_template = [
2536 output_template = [
2533 i
2537 i
2534 for i in output_template
2538 for i in output_template
2535 if not ('time' in i[0] or 'renames' in i[0])
2539 if not ('time' in i[0] or 'renames' in i[0])
2536 ]
2540 ]
2537 header_names = [h for (h, v) in output_template]
2541 header_names = [h for (h, v) in output_template]
2538 output = ' '.join([v for (h, v) in output_template]) + '\n'
2542 output = ' '.join([v for (h, v) in output_template]) + '\n'
2539 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2543 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2540 fm.plain(header % tuple(header_names))
2544 fm.plain(header % tuple(header_names))
2541
2545
2542 if not revs:
2546 if not revs:
2543 revs = ['all()']
2547 revs = ['all()']
2544 revs = scmutil.revrange(repo, revs)
2548 revs = scmutil.revrange(repo, revs)
2545
2549
2546 if dostats:
2550 if dostats:
2547 alldata = {
2551 alldata = {
2548 'nbrevs': [],
2552 'nbrevs': [],
2549 'nbmissingfiles': [],
2553 'nbmissingfiles': [],
2550 }
2554 }
2551 if dotiming:
2555 if dotiming:
2552 alldata['parentnbrenames'] = []
2556 alldata['parentnbrenames'] = []
2553 alldata['totalnbrenames'] = []
2557 alldata['totalnbrenames'] = []
2554 alldata['parenttime'] = []
2558 alldata['parenttime'] = []
2555 alldata['totaltime'] = []
2559 alldata['totaltime'] = []
2556
2560
2557 roi = repo.revs('merge() and %ld', revs)
2561 roi = repo.revs('merge() and %ld', revs)
2558 for r in roi:
2562 for r in roi:
2559 ctx = repo[r]
2563 ctx = repo[r]
2560 p1 = ctx.p1()
2564 p1 = ctx.p1()
2561 p2 = ctx.p2()
2565 p2 = ctx.p2()
2562 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2566 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2563 for b in bases:
2567 for b in bases:
2564 b = repo[b]
2568 b = repo[b]
2565 p1missing = copies._computeforwardmissing(b, p1)
2569 p1missing = copies._computeforwardmissing(b, p1)
2566 p2missing = copies._computeforwardmissing(b, p2)
2570 p2missing = copies._computeforwardmissing(b, p2)
2567 data = {
2571 data = {
2568 b'base': b.hex(),
2572 b'base': b.hex(),
2569 b'p1.node': p1.hex(),
2573 b'p1.node': p1.hex(),
2570 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2574 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2571 b'p1.nbmissingfiles': len(p1missing),
2575 b'p1.nbmissingfiles': len(p1missing),
2572 b'p2.node': p2.hex(),
2576 b'p2.node': p2.hex(),
2573 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2577 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2574 b'p2.nbmissingfiles': len(p2missing),
2578 b'p2.nbmissingfiles': len(p2missing),
2575 }
2579 }
2576 if dostats:
2580 if dostats:
2577 if p1missing:
2581 if p1missing:
2578 alldata['nbrevs'].append(
2582 alldata['nbrevs'].append(
2579 (data['p1.nbrevs'], b.hex(), p1.hex())
2583 (data['p1.nbrevs'], b.hex(), p1.hex())
2580 )
2584 )
2581 alldata['nbmissingfiles'].append(
2585 alldata['nbmissingfiles'].append(
2582 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2586 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2583 )
2587 )
2584 if p2missing:
2588 if p2missing:
2585 alldata['nbrevs'].append(
2589 alldata['nbrevs'].append(
2586 (data['p2.nbrevs'], b.hex(), p2.hex())
2590 (data['p2.nbrevs'], b.hex(), p2.hex())
2587 )
2591 )
2588 alldata['nbmissingfiles'].append(
2592 alldata['nbmissingfiles'].append(
2589 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2593 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2590 )
2594 )
2591 if dotiming:
2595 if dotiming:
2592 begin = util.timer()
2596 begin = util.timer()
2593 mergedata = copies.mergecopies(repo, p1, p2, b)
2597 mergedata = copies.mergecopies(repo, p1, p2, b)
2594 end = util.timer()
2598 end = util.timer()
2595 # not very stable timing since we did only one run
2599 # not very stable timing since we did only one run
2596 data['time'] = end - begin
2600 data['time'] = end - begin
2597 # mergedata contains five dicts: "copy", "movewithdir",
2601 # mergedata contains five dicts: "copy", "movewithdir",
2598 # "diverge", "renamedelete" and "dirmove".
2602 # "diverge", "renamedelete" and "dirmove".
2599 # The first 4 are about renamed file so lets count that.
2603 # The first 4 are about renamed file so lets count that.
2600 renames = len(mergedata[0])
2604 renames = len(mergedata[0])
2601 renames += len(mergedata[1])
2605 renames += len(mergedata[1])
2602 renames += len(mergedata[2])
2606 renames += len(mergedata[2])
2603 renames += len(mergedata[3])
2607 renames += len(mergedata[3])
2604 data['nbrenamedfiles'] = renames
2608 data['nbrenamedfiles'] = renames
2605 begin = util.timer()
2609 begin = util.timer()
2606 p1renames = copies.pathcopies(b, p1)
2610 p1renames = copies.pathcopies(b, p1)
2607 end = util.timer()
2611 end = util.timer()
2608 data['p1.time'] = end - begin
2612 data['p1.time'] = end - begin
2609 begin = util.timer()
2613 begin = util.timer()
2610 p2renames = copies.pathcopies(b, p2)
2614 p2renames = copies.pathcopies(b, p2)
2611 end = util.timer()
2615 end = util.timer()
2612 data['p2.time'] = end - begin
2616 data['p2.time'] = end - begin
2613 data['p1.renamedfiles'] = len(p1renames)
2617 data['p1.renamedfiles'] = len(p1renames)
2614 data['p2.renamedfiles'] = len(p2renames)
2618 data['p2.renamedfiles'] = len(p2renames)
2615
2619
2616 if dostats:
2620 if dostats:
2617 if p1missing:
2621 if p1missing:
2618 alldata['parentnbrenames'].append(
2622 alldata['parentnbrenames'].append(
2619 (data['p1.renamedfiles'], b.hex(), p1.hex())
2623 (data['p1.renamedfiles'], b.hex(), p1.hex())
2620 )
2624 )
2621 alldata['parenttime'].append(
2625 alldata['parenttime'].append(
2622 (data['p1.time'], b.hex(), p1.hex())
2626 (data['p1.time'], b.hex(), p1.hex())
2623 )
2627 )
2624 if p2missing:
2628 if p2missing:
2625 alldata['parentnbrenames'].append(
2629 alldata['parentnbrenames'].append(
2626 (data['p2.renamedfiles'], b.hex(), p2.hex())
2630 (data['p2.renamedfiles'], b.hex(), p2.hex())
2627 )
2631 )
2628 alldata['parenttime'].append(
2632 alldata['parenttime'].append(
2629 (data['p2.time'], b.hex(), p2.hex())
2633 (data['p2.time'], b.hex(), p2.hex())
2630 )
2634 )
2631 if p1missing or p2missing:
2635 if p1missing or p2missing:
2632 alldata['totalnbrenames'].append(
2636 alldata['totalnbrenames'].append(
2633 (
2637 (
2634 data['nbrenamedfiles'],
2638 data['nbrenamedfiles'],
2635 b.hex(),
2639 b.hex(),
2636 p1.hex(),
2640 p1.hex(),
2637 p2.hex(),
2641 p2.hex(),
2638 )
2642 )
2639 )
2643 )
2640 alldata['totaltime'].append(
2644 alldata['totaltime'].append(
2641 (data['time'], b.hex(), p1.hex(), p2.hex())
2645 (data['time'], b.hex(), p1.hex(), p2.hex())
2642 )
2646 )
2643 fm.startitem()
2647 fm.startitem()
2644 fm.data(**data)
2648 fm.data(**data)
2645 # make node pretty for the human output
2649 # make node pretty for the human output
2646 out = data.copy()
2650 out = data.copy()
2647 out['base'] = fm.hexfunc(b.node())
2651 out['base'] = fm.hexfunc(b.node())
2648 out['p1.node'] = fm.hexfunc(p1.node())
2652 out['p1.node'] = fm.hexfunc(p1.node())
2649 out['p2.node'] = fm.hexfunc(p2.node())
2653 out['p2.node'] = fm.hexfunc(p2.node())
2650 fm.plain(output % out)
2654 fm.plain(output % out)
2651
2655
2652 fm.end()
2656 fm.end()
2653 if dostats:
2657 if dostats:
2654 # use a second formatter because the data are quite different, not sure
2658 # use a second formatter because the data are quite different, not sure
2655 # how it flies with the templater.
2659 # how it flies with the templater.
2656 entries = [
2660 entries = [
2657 ('nbrevs', 'number of revision covered'),
2661 ('nbrevs', 'number of revision covered'),
2658 ('nbmissingfiles', 'number of missing files at head'),
2662 ('nbmissingfiles', 'number of missing files at head'),
2659 ]
2663 ]
2660 if dotiming:
2664 if dotiming:
2661 entries.append(
2665 entries.append(
2662 ('parentnbrenames', 'rename from one parent to base')
2666 ('parentnbrenames', 'rename from one parent to base')
2663 )
2667 )
2664 entries.append(('totalnbrenames', 'total number of renames'))
2668 entries.append(('totalnbrenames', 'total number of renames'))
2665 entries.append(('parenttime', 'time for one parent'))
2669 entries.append(('parenttime', 'time for one parent'))
2666 entries.append(('totaltime', 'time for both parents'))
2670 entries.append(('totaltime', 'time for both parents'))
2667 _displaystats(ui, opts, entries, alldata)
2671 _displaystats(ui, opts, entries, alldata)
2668
2672
2669
2673
2670 @command(
2674 @command(
2671 b'perf::helper-pathcopies|perfhelper-pathcopies',
2675 b'perf::helper-pathcopies|perfhelper-pathcopies',
2672 formatteropts
2676 formatteropts
2673 + [
2677 + [
2674 (b'r', b'revs', [], b'restrict search to these revisions'),
2678 (b'r', b'revs', [], b'restrict search to these revisions'),
2675 (b'', b'timing', False, b'provides extra data (costly)'),
2679 (b'', b'timing', False, b'provides extra data (costly)'),
2676 (b'', b'stats', False, b'provides statistic about the measured data'),
2680 (b'', b'stats', False, b'provides statistic about the measured data'),
2677 ],
2681 ],
2678 )
2682 )
2679 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2683 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2680 """find statistic about potential parameters for the `perftracecopies`
2684 """find statistic about potential parameters for the `perftracecopies`
2681
2685
2682 This command find source-destination pair relevant for copytracing testing.
2686 This command find source-destination pair relevant for copytracing testing.
2683 It report value for some of the parameters that impact copy tracing time.
2687 It report value for some of the parameters that impact copy tracing time.
2684
2688
2685 If `--timing` is set, rename detection is run and the associated timing
2689 If `--timing` is set, rename detection is run and the associated timing
2686 will be reported. The extra details comes at the cost of a slower command
2690 will be reported. The extra details comes at the cost of a slower command
2687 execution.
2691 execution.
2688
2692
2689 Since the rename detection is only run once, other factors might easily
2693 Since the rename detection is only run once, other factors might easily
2690 affect the precision of the timing. However it should give a good
2694 affect the precision of the timing. However it should give a good
2691 approximation of which revision pairs are very costly.
2695 approximation of which revision pairs are very costly.
2692 """
2696 """
2693 opts = _byteskwargs(opts)
2697 opts = _byteskwargs(opts)
2694 fm = ui.formatter(b'perf', opts)
2698 fm = ui.formatter(b'perf', opts)
2695 dotiming = opts[b'timing']
2699 dotiming = opts[b'timing']
2696 dostats = opts[b'stats']
2700 dostats = opts[b'stats']
2697
2701
2698 if dotiming:
2702 if dotiming:
2699 header = '%12s %12s %12s %12s %12s %12s\n'
2703 header = '%12s %12s %12s %12s %12s %12s\n'
2700 output = (
2704 output = (
2701 "%(source)12s %(destination)12s "
2705 "%(source)12s %(destination)12s "
2702 "%(nbrevs)12d %(nbmissingfiles)12d "
2706 "%(nbrevs)12d %(nbmissingfiles)12d "
2703 "%(nbrenamedfiles)12d %(time)18.5f\n"
2707 "%(nbrenamedfiles)12d %(time)18.5f\n"
2704 )
2708 )
2705 header_names = (
2709 header_names = (
2706 "source",
2710 "source",
2707 "destination",
2711 "destination",
2708 "nb-revs",
2712 "nb-revs",
2709 "nb-files",
2713 "nb-files",
2710 "nb-renames",
2714 "nb-renames",
2711 "time",
2715 "time",
2712 )
2716 )
2713 fm.plain(header % header_names)
2717 fm.plain(header % header_names)
2714 else:
2718 else:
2715 header = '%12s %12s %12s %12s\n'
2719 header = '%12s %12s %12s %12s\n'
2716 output = (
2720 output = (
2717 "%(source)12s %(destination)12s "
2721 "%(source)12s %(destination)12s "
2718 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2722 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2719 )
2723 )
2720 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2724 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2721
2725
2722 if not revs:
2726 if not revs:
2723 revs = ['all()']
2727 revs = ['all()']
2724 revs = scmutil.revrange(repo, revs)
2728 revs = scmutil.revrange(repo, revs)
2725
2729
2726 if dostats:
2730 if dostats:
2727 alldata = {
2731 alldata = {
2728 'nbrevs': [],
2732 'nbrevs': [],
2729 'nbmissingfiles': [],
2733 'nbmissingfiles': [],
2730 }
2734 }
2731 if dotiming:
2735 if dotiming:
2732 alldata['nbrenames'] = []
2736 alldata['nbrenames'] = []
2733 alldata['time'] = []
2737 alldata['time'] = []
2734
2738
2735 roi = repo.revs('merge() and %ld', revs)
2739 roi = repo.revs('merge() and %ld', revs)
2736 for r in roi:
2740 for r in roi:
2737 ctx = repo[r]
2741 ctx = repo[r]
2738 p1 = ctx.p1().rev()
2742 p1 = ctx.p1().rev()
2739 p2 = ctx.p2().rev()
2743 p2 = ctx.p2().rev()
2740 bases = repo.changelog._commonancestorsheads(p1, p2)
2744 bases = repo.changelog._commonancestorsheads(p1, p2)
2741 for p in (p1, p2):
2745 for p in (p1, p2):
2742 for b in bases:
2746 for b in bases:
2743 base = repo[b]
2747 base = repo[b]
2744 parent = repo[p]
2748 parent = repo[p]
2745 missing = copies._computeforwardmissing(base, parent)
2749 missing = copies._computeforwardmissing(base, parent)
2746 if not missing:
2750 if not missing:
2747 continue
2751 continue
2748 data = {
2752 data = {
2749 b'source': base.hex(),
2753 b'source': base.hex(),
2750 b'destination': parent.hex(),
2754 b'destination': parent.hex(),
2751 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2755 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2752 b'nbmissingfiles': len(missing),
2756 b'nbmissingfiles': len(missing),
2753 }
2757 }
2754 if dostats:
2758 if dostats:
2755 alldata['nbrevs'].append(
2759 alldata['nbrevs'].append(
2756 (
2760 (
2757 data['nbrevs'],
2761 data['nbrevs'],
2758 base.hex(),
2762 base.hex(),
2759 parent.hex(),
2763 parent.hex(),
2760 )
2764 )
2761 )
2765 )
2762 alldata['nbmissingfiles'].append(
2766 alldata['nbmissingfiles'].append(
2763 (
2767 (
2764 data['nbmissingfiles'],
2768 data['nbmissingfiles'],
2765 base.hex(),
2769 base.hex(),
2766 parent.hex(),
2770 parent.hex(),
2767 )
2771 )
2768 )
2772 )
2769 if dotiming:
2773 if dotiming:
2770 begin = util.timer()
2774 begin = util.timer()
2771 renames = copies.pathcopies(base, parent)
2775 renames = copies.pathcopies(base, parent)
2772 end = util.timer()
2776 end = util.timer()
2773 # not very stable timing since we did only one run
2777 # not very stable timing since we did only one run
2774 data['time'] = end - begin
2778 data['time'] = end - begin
2775 data['nbrenamedfiles'] = len(renames)
2779 data['nbrenamedfiles'] = len(renames)
2776 if dostats:
2780 if dostats:
2777 alldata['time'].append(
2781 alldata['time'].append(
2778 (
2782 (
2779 data['time'],
2783 data['time'],
2780 base.hex(),
2784 base.hex(),
2781 parent.hex(),
2785 parent.hex(),
2782 )
2786 )
2783 )
2787 )
2784 alldata['nbrenames'].append(
2788 alldata['nbrenames'].append(
2785 (
2789 (
2786 data['nbrenamedfiles'],
2790 data['nbrenamedfiles'],
2787 base.hex(),
2791 base.hex(),
2788 parent.hex(),
2792 parent.hex(),
2789 )
2793 )
2790 )
2794 )
2791 fm.startitem()
2795 fm.startitem()
2792 fm.data(**data)
2796 fm.data(**data)
2793 out = data.copy()
2797 out = data.copy()
2794 out['source'] = fm.hexfunc(base.node())
2798 out['source'] = fm.hexfunc(base.node())
2795 out['destination'] = fm.hexfunc(parent.node())
2799 out['destination'] = fm.hexfunc(parent.node())
2796 fm.plain(output % out)
2800 fm.plain(output % out)
2797
2801
2798 fm.end()
2802 fm.end()
2799 if dostats:
2803 if dostats:
2800 entries = [
2804 entries = [
2801 ('nbrevs', 'number of revision covered'),
2805 ('nbrevs', 'number of revision covered'),
2802 ('nbmissingfiles', 'number of missing files at head'),
2806 ('nbmissingfiles', 'number of missing files at head'),
2803 ]
2807 ]
2804 if dotiming:
2808 if dotiming:
2805 entries.append(('nbrenames', 'renamed files'))
2809 entries.append(('nbrenames', 'renamed files'))
2806 entries.append(('time', 'time'))
2810 entries.append(('time', 'time'))
2807 _displaystats(ui, opts, entries, alldata)
2811 _displaystats(ui, opts, entries, alldata)
2808
2812
2809
2813
2810 @command(b'perf::cca|perfcca', formatteropts)
2814 @command(b'perf::cca|perfcca', formatteropts)
2811 def perfcca(ui, repo, **opts):
2815 def perfcca(ui, repo, **opts):
2812 opts = _byteskwargs(opts)
2816 opts = _byteskwargs(opts)
2813 timer, fm = gettimer(ui, opts)
2817 timer, fm = gettimer(ui, opts)
2814 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2818 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2815 fm.end()
2819 fm.end()
2816
2820
2817
2821
2818 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2822 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2819 def perffncacheload(ui, repo, **opts):
2823 def perffncacheload(ui, repo, **opts):
2820 opts = _byteskwargs(opts)
2824 opts = _byteskwargs(opts)
2821 timer, fm = gettimer(ui, opts)
2825 timer, fm = gettimer(ui, opts)
2822 s = repo.store
2826 s = repo.store
2823
2827
2824 def d():
2828 def d():
2825 s.fncache._load()
2829 s.fncache._load()
2826
2830
2827 timer(d)
2831 timer(d)
2828 fm.end()
2832 fm.end()
2829
2833
2830
2834
2831 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2835 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2832 def perffncachewrite(ui, repo, **opts):
2836 def perffncachewrite(ui, repo, **opts):
2833 opts = _byteskwargs(opts)
2837 opts = _byteskwargs(opts)
2834 timer, fm = gettimer(ui, opts)
2838 timer, fm = gettimer(ui, opts)
2835 s = repo.store
2839 s = repo.store
2836 lock = repo.lock()
2840 lock = repo.lock()
2837 s.fncache._load()
2841 s.fncache._load()
2838 tr = repo.transaction(b'perffncachewrite')
2842 tr = repo.transaction(b'perffncachewrite')
2839 tr.addbackup(b'fncache')
2843 tr.addbackup(b'fncache')
2840
2844
2841 def d():
2845 def d():
2842 s.fncache._dirty = True
2846 s.fncache._dirty = True
2843 s.fncache.write(tr)
2847 s.fncache.write(tr)
2844
2848
2845 timer(d)
2849 timer(d)
2846 tr.close()
2850 tr.close()
2847 lock.release()
2851 lock.release()
2848 fm.end()
2852 fm.end()
2849
2853
2850
2854
2851 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2855 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2852 def perffncacheencode(ui, repo, **opts):
2856 def perffncacheencode(ui, repo, **opts):
2853 opts = _byteskwargs(opts)
2857 opts = _byteskwargs(opts)
2854 timer, fm = gettimer(ui, opts)
2858 timer, fm = gettimer(ui, opts)
2855 s = repo.store
2859 s = repo.store
2856 s.fncache._load()
2860 s.fncache._load()
2857
2861
2858 def d():
2862 def d():
2859 for p in s.fncache.entries:
2863 for p in s.fncache.entries:
2860 s.encode(p)
2864 s.encode(p)
2861
2865
2862 timer(d)
2866 timer(d)
2863 fm.end()
2867 fm.end()
2864
2868
2865
2869
2866 def _bdiffworker(q, blocks, xdiff, ready, done):
2870 def _bdiffworker(q, blocks, xdiff, ready, done):
2867 while not done.is_set():
2871 while not done.is_set():
2868 pair = q.get()
2872 pair = q.get()
2869 while pair is not None:
2873 while pair is not None:
2870 if xdiff:
2874 if xdiff:
2871 mdiff.bdiff.xdiffblocks(*pair)
2875 mdiff.bdiff.xdiffblocks(*pair)
2872 elif blocks:
2876 elif blocks:
2873 mdiff.bdiff.blocks(*pair)
2877 mdiff.bdiff.blocks(*pair)
2874 else:
2878 else:
2875 mdiff.textdiff(*pair)
2879 mdiff.textdiff(*pair)
2876 q.task_done()
2880 q.task_done()
2877 pair = q.get()
2881 pair = q.get()
2878 q.task_done() # for the None one
2882 q.task_done() # for the None one
2879 with ready:
2883 with ready:
2880 ready.wait()
2884 ready.wait()
2881
2885
2882
2886
2883 def _manifestrevision(repo, mnode):
2887 def _manifestrevision(repo, mnode):
2884 ml = repo.manifestlog
2888 ml = repo.manifestlog
2885
2889
2886 if util.safehasattr(ml, b'getstorage'):
2890 if util.safehasattr(ml, b'getstorage'):
2887 store = ml.getstorage(b'')
2891 store = ml.getstorage(b'')
2888 else:
2892 else:
2889 store = ml._revlog
2893 store = ml._revlog
2890
2894
2891 return store.revision(mnode)
2895 return store.revision(mnode)
2892
2896
2893
2897
2894 @command(
2898 @command(
2895 b'perf::bdiff|perfbdiff',
2899 b'perf::bdiff|perfbdiff',
2896 revlogopts
2900 revlogopts
2897 + formatteropts
2901 + formatteropts
2898 + [
2902 + [
2899 (
2903 (
2900 b'',
2904 b'',
2901 b'count',
2905 b'count',
2902 1,
2906 1,
2903 b'number of revisions to test (when using --startrev)',
2907 b'number of revisions to test (when using --startrev)',
2904 ),
2908 ),
2905 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2909 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2906 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2910 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2907 (b'', b'blocks', False, b'test computing diffs into blocks'),
2911 (b'', b'blocks', False, b'test computing diffs into blocks'),
2908 (b'', b'xdiff', False, b'use xdiff algorithm'),
2912 (b'', b'xdiff', False, b'use xdiff algorithm'),
2909 ],
2913 ],
2910 b'-c|-m|FILE REV',
2914 b'-c|-m|FILE REV',
2911 )
2915 )
2912 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2916 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2913 """benchmark a bdiff between revisions
2917 """benchmark a bdiff between revisions
2914
2918
2915 By default, benchmark a bdiff between its delta parent and itself.
2919 By default, benchmark a bdiff between its delta parent and itself.
2916
2920
2917 With ``--count``, benchmark bdiffs between delta parents and self for N
2921 With ``--count``, benchmark bdiffs between delta parents and self for N
2918 revisions starting at the specified revision.
2922 revisions starting at the specified revision.
2919
2923
2920 With ``--alldata``, assume the requested revision is a changeset and
2924 With ``--alldata``, assume the requested revision is a changeset and
2921 measure bdiffs for all changes related to that changeset (manifest
2925 measure bdiffs for all changes related to that changeset (manifest
2922 and filelogs).
2926 and filelogs).
2923 """
2927 """
2924 opts = _byteskwargs(opts)
2928 opts = _byteskwargs(opts)
2925
2929
2926 if opts[b'xdiff'] and not opts[b'blocks']:
2930 if opts[b'xdiff'] and not opts[b'blocks']:
2927 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2931 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2928
2932
2929 if opts[b'alldata']:
2933 if opts[b'alldata']:
2930 opts[b'changelog'] = True
2934 opts[b'changelog'] = True
2931
2935
2932 if opts.get(b'changelog') or opts.get(b'manifest'):
2936 if opts.get(b'changelog') or opts.get(b'manifest'):
2933 file_, rev = None, file_
2937 file_, rev = None, file_
2934 elif rev is None:
2938 elif rev is None:
2935 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2939 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2936
2940
2937 blocks = opts[b'blocks']
2941 blocks = opts[b'blocks']
2938 xdiff = opts[b'xdiff']
2942 xdiff = opts[b'xdiff']
2939 textpairs = []
2943 textpairs = []
2940
2944
2941 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2945 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2942
2946
2943 startrev = r.rev(r.lookup(rev))
2947 startrev = r.rev(r.lookup(rev))
2944 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2948 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2945 if opts[b'alldata']:
2949 if opts[b'alldata']:
2946 # Load revisions associated with changeset.
2950 # Load revisions associated with changeset.
2947 ctx = repo[rev]
2951 ctx = repo[rev]
2948 mtext = _manifestrevision(repo, ctx.manifestnode())
2952 mtext = _manifestrevision(repo, ctx.manifestnode())
2949 for pctx in ctx.parents():
2953 for pctx in ctx.parents():
2950 pman = _manifestrevision(repo, pctx.manifestnode())
2954 pman = _manifestrevision(repo, pctx.manifestnode())
2951 textpairs.append((pman, mtext))
2955 textpairs.append((pman, mtext))
2952
2956
2953 # Load filelog revisions by iterating manifest delta.
2957 # Load filelog revisions by iterating manifest delta.
2954 man = ctx.manifest()
2958 man = ctx.manifest()
2955 pman = ctx.p1().manifest()
2959 pman = ctx.p1().manifest()
2956 for filename, change in pman.diff(man).items():
2960 for filename, change in pman.diff(man).items():
2957 fctx = repo.file(filename)
2961 fctx = repo.file(filename)
2958 f1 = fctx.revision(change[0][0] or -1)
2962 f1 = fctx.revision(change[0][0] or -1)
2959 f2 = fctx.revision(change[1][0] or -1)
2963 f2 = fctx.revision(change[1][0] or -1)
2960 textpairs.append((f1, f2))
2964 textpairs.append((f1, f2))
2961 else:
2965 else:
2962 dp = r.deltaparent(rev)
2966 dp = r.deltaparent(rev)
2963 textpairs.append((r.revision(dp), r.revision(rev)))
2967 textpairs.append((r.revision(dp), r.revision(rev)))
2964
2968
2965 withthreads = threads > 0
2969 withthreads = threads > 0
2966 if not withthreads:
2970 if not withthreads:
2967
2971
2968 def d():
2972 def d():
2969 for pair in textpairs:
2973 for pair in textpairs:
2970 if xdiff:
2974 if xdiff:
2971 mdiff.bdiff.xdiffblocks(*pair)
2975 mdiff.bdiff.xdiffblocks(*pair)
2972 elif blocks:
2976 elif blocks:
2973 mdiff.bdiff.blocks(*pair)
2977 mdiff.bdiff.blocks(*pair)
2974 else:
2978 else:
2975 mdiff.textdiff(*pair)
2979 mdiff.textdiff(*pair)
2976
2980
2977 else:
2981 else:
2978 q = queue()
2982 q = queue()
2979 for i in _xrange(threads):
2983 for i in _xrange(threads):
2980 q.put(None)
2984 q.put(None)
2981 ready = threading.Condition()
2985 ready = threading.Condition()
2982 done = threading.Event()
2986 done = threading.Event()
2983 for i in _xrange(threads):
2987 for i in _xrange(threads):
2984 threading.Thread(
2988 threading.Thread(
2985 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2989 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2986 ).start()
2990 ).start()
2987 q.join()
2991 q.join()
2988
2992
2989 def d():
2993 def d():
2990 for pair in textpairs:
2994 for pair in textpairs:
2991 q.put(pair)
2995 q.put(pair)
2992 for i in _xrange(threads):
2996 for i in _xrange(threads):
2993 q.put(None)
2997 q.put(None)
2994 with ready:
2998 with ready:
2995 ready.notify_all()
2999 ready.notify_all()
2996 q.join()
3000 q.join()
2997
3001
2998 timer, fm = gettimer(ui, opts)
3002 timer, fm = gettimer(ui, opts)
2999 timer(d)
3003 timer(d)
3000 fm.end()
3004 fm.end()
3001
3005
3002 if withthreads:
3006 if withthreads:
3003 done.set()
3007 done.set()
3004 for i in _xrange(threads):
3008 for i in _xrange(threads):
3005 q.put(None)
3009 q.put(None)
3006 with ready:
3010 with ready:
3007 ready.notify_all()
3011 ready.notify_all()
3008
3012
3009
3013
3010 @command(
3014 @command(
3011 b'perf::unbundle',
3015 b'perf::unbundle',
3012 [
3016 [
3013 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3017 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3014 ]
3018 ]
3015 + formatteropts,
3019 + formatteropts,
3016 b'BUNDLE_FILE',
3020 b'BUNDLE_FILE',
3017 )
3021 )
3018 def perf_unbundle(ui, repo, fname, **opts):
3022 def perf_unbundle(ui, repo, fname, **opts):
3019 """benchmark application of a bundle in a repository.
3023 """benchmark application of a bundle in a repository.
3020
3024
3021 This does not include the final transaction processing
3025 This does not include the final transaction processing
3022
3026
3023 The --as-push option make the unbundle operation appears like it comes from
3027 The --as-push option make the unbundle operation appears like it comes from
3024 a client push. It change some aspect of the processing and associated
3028 a client push. It change some aspect of the processing and associated
3025 performance profile.
3029 performance profile.
3026 """
3030 """
3027
3031
3028 from mercurial import exchange
3032 from mercurial import exchange
3029 from mercurial import bundle2
3033 from mercurial import bundle2
3030 from mercurial import transaction
3034 from mercurial import transaction
3031
3035
3032 opts = _byteskwargs(opts)
3036 opts = _byteskwargs(opts)
3033
3037
3034 ### some compatibility hotfix
3038 ### some compatibility hotfix
3035 #
3039 #
3036 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3040 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3037 # critical regression that break transaction rollback for files that are
3041 # critical regression that break transaction rollback for files that are
3038 # de-inlined.
3042 # de-inlined.
3039 method = transaction.transaction._addentry
3043 method = transaction.transaction._addentry
3040 pre_63edc384d3b7 = "data" in getargspec(method).args
3044 pre_63edc384d3b7 = "data" in getargspec(method).args
3041 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3045 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3042 # a changeset that is a close descendant of 18415fc918a1, the changeset
3046 # a changeset that is a close descendant of 18415fc918a1, the changeset
3043 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3047 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3044 args = getargspec(error.Abort.__init__).args
3048 args = getargspec(error.Abort.__init__).args
3045 post_18415fc918a1 = "detailed_exit_code" in args
3049 post_18415fc918a1 = "detailed_exit_code" in args
3046
3050
3047 unbundle_source = b'perf::unbundle'
3051 unbundle_source = b'perf::unbundle'
3048 if opts[b'as_push']:
3052 if opts[b'as_push']:
3049 unbundle_source = b'push'
3053 unbundle_source = b'push'
3050
3054
3051 old_max_inline = None
3055 old_max_inline = None
3052 try:
3056 try:
3053 if not (pre_63edc384d3b7 or post_18415fc918a1):
3057 if not (pre_63edc384d3b7 or post_18415fc918a1):
3054 # disable inlining
3058 # disable inlining
3055 old_max_inline = mercurial.revlog._maxinline
3059 old_max_inline = mercurial.revlog._maxinline
3056 # large enough to never happen
3060 # large enough to never happen
3057 mercurial.revlog._maxinline = 2 ** 50
3061 mercurial.revlog._maxinline = 2 ** 50
3058
3062
3059 with repo.lock():
3063 with repo.lock():
3060 bundle = [None, None]
3064 bundle = [None, None]
3061 orig_quiet = repo.ui.quiet
3065 orig_quiet = repo.ui.quiet
3062 try:
3066 try:
3063 repo.ui.quiet = True
3067 repo.ui.quiet = True
3064 with open(fname, mode="rb") as f:
3068 with open(fname, mode="rb") as f:
3065
3069
3066 def noop_report(*args, **kwargs):
3070 def noop_report(*args, **kwargs):
3067 pass
3071 pass
3068
3072
3069 def setup():
3073 def setup():
3070 gen, tr = bundle
3074 gen, tr = bundle
3071 if tr is not None:
3075 if tr is not None:
3072 tr.abort()
3076 tr.abort()
3073 bundle[:] = [None, None]
3077 bundle[:] = [None, None]
3074 f.seek(0)
3078 f.seek(0)
3075 bundle[0] = exchange.readbundle(ui, f, fname)
3079 bundle[0] = exchange.readbundle(ui, f, fname)
3076 bundle[1] = repo.transaction(b'perf::unbundle')
3080 bundle[1] = repo.transaction(b'perf::unbundle')
3077 # silence the transaction
3081 # silence the transaction
3078 bundle[1]._report = noop_report
3082 bundle[1]._report = noop_report
3079
3083
3080 def apply():
3084 def apply():
3081 gen, tr = bundle
3085 gen, tr = bundle
3082 bundle2.applybundle(
3086 bundle2.applybundle(
3083 repo,
3087 repo,
3084 gen,
3088 gen,
3085 tr,
3089 tr,
3086 source=unbundle_source,
3090 source=unbundle_source,
3087 url=fname,
3091 url=fname,
3088 )
3092 )
3089
3093
3090 timer, fm = gettimer(ui, opts)
3094 timer, fm = gettimer(ui, opts)
3091 timer(apply, setup=setup)
3095 timer(apply, setup=setup)
3092 fm.end()
3096 fm.end()
3093 finally:
3097 finally:
3094 repo.ui.quiet == orig_quiet
3098 repo.ui.quiet == orig_quiet
3095 gen, tr = bundle
3099 gen, tr = bundle
3096 if tr is not None:
3100 if tr is not None:
3097 tr.abort()
3101 tr.abort()
3098 finally:
3102 finally:
3099 if old_max_inline is not None:
3103 if old_max_inline is not None:
3100 mercurial.revlog._maxinline = old_max_inline
3104 mercurial.revlog._maxinline = old_max_inline
3101
3105
3102
3106
3103 @command(
3107 @command(
3104 b'perf::unidiff|perfunidiff',
3108 b'perf::unidiff|perfunidiff',
3105 revlogopts
3109 revlogopts
3106 + formatteropts
3110 + formatteropts
3107 + [
3111 + [
3108 (
3112 (
3109 b'',
3113 b'',
3110 b'count',
3114 b'count',
3111 1,
3115 1,
3112 b'number of revisions to test (when using --startrev)',
3116 b'number of revisions to test (when using --startrev)',
3113 ),
3117 ),
3114 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3118 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3115 ],
3119 ],
3116 b'-c|-m|FILE REV',
3120 b'-c|-m|FILE REV',
3117 )
3121 )
3118 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3122 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3119 """benchmark a unified diff between revisions
3123 """benchmark a unified diff between revisions
3120
3124
3121 This doesn't include any copy tracing - it's just a unified diff
3125 This doesn't include any copy tracing - it's just a unified diff
3122 of the texts.
3126 of the texts.
3123
3127
3124 By default, benchmark a diff between its delta parent and itself.
3128 By default, benchmark a diff between its delta parent and itself.
3125
3129
3126 With ``--count``, benchmark diffs between delta parents and self for N
3130 With ``--count``, benchmark diffs between delta parents and self for N
3127 revisions starting at the specified revision.
3131 revisions starting at the specified revision.
3128
3132
3129 With ``--alldata``, assume the requested revision is a changeset and
3133 With ``--alldata``, assume the requested revision is a changeset and
3130 measure diffs for all changes related to that changeset (manifest
3134 measure diffs for all changes related to that changeset (manifest
3131 and filelogs).
3135 and filelogs).
3132 """
3136 """
3133 opts = _byteskwargs(opts)
3137 opts = _byteskwargs(opts)
3134 if opts[b'alldata']:
3138 if opts[b'alldata']:
3135 opts[b'changelog'] = True
3139 opts[b'changelog'] = True
3136
3140
3137 if opts.get(b'changelog') or opts.get(b'manifest'):
3141 if opts.get(b'changelog') or opts.get(b'manifest'):
3138 file_, rev = None, file_
3142 file_, rev = None, file_
3139 elif rev is None:
3143 elif rev is None:
3140 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3144 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3141
3145
3142 textpairs = []
3146 textpairs = []
3143
3147
3144 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3148 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3145
3149
3146 startrev = r.rev(r.lookup(rev))
3150 startrev = r.rev(r.lookup(rev))
3147 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3151 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3148 if opts[b'alldata']:
3152 if opts[b'alldata']:
3149 # Load revisions associated with changeset.
3153 # Load revisions associated with changeset.
3150 ctx = repo[rev]
3154 ctx = repo[rev]
3151 mtext = _manifestrevision(repo, ctx.manifestnode())
3155 mtext = _manifestrevision(repo, ctx.manifestnode())
3152 for pctx in ctx.parents():
3156 for pctx in ctx.parents():
3153 pman = _manifestrevision(repo, pctx.manifestnode())
3157 pman = _manifestrevision(repo, pctx.manifestnode())
3154 textpairs.append((pman, mtext))
3158 textpairs.append((pman, mtext))
3155
3159
3156 # Load filelog revisions by iterating manifest delta.
3160 # Load filelog revisions by iterating manifest delta.
3157 man = ctx.manifest()
3161 man = ctx.manifest()
3158 pman = ctx.p1().manifest()
3162 pman = ctx.p1().manifest()
3159 for filename, change in pman.diff(man).items():
3163 for filename, change in pman.diff(man).items():
3160 fctx = repo.file(filename)
3164 fctx = repo.file(filename)
3161 f1 = fctx.revision(change[0][0] or -1)
3165 f1 = fctx.revision(change[0][0] or -1)
3162 f2 = fctx.revision(change[1][0] or -1)
3166 f2 = fctx.revision(change[1][0] or -1)
3163 textpairs.append((f1, f2))
3167 textpairs.append((f1, f2))
3164 else:
3168 else:
3165 dp = r.deltaparent(rev)
3169 dp = r.deltaparent(rev)
3166 textpairs.append((r.revision(dp), r.revision(rev)))
3170 textpairs.append((r.revision(dp), r.revision(rev)))
3167
3171
3168 def d():
3172 def d():
3169 for left, right in textpairs:
3173 for left, right in textpairs:
3170 # The date strings don't matter, so we pass empty strings.
3174 # The date strings don't matter, so we pass empty strings.
3171 headerlines, hunks = mdiff.unidiff(
3175 headerlines, hunks = mdiff.unidiff(
3172 left, b'', right, b'', b'left', b'right', binary=False
3176 left, b'', right, b'', b'left', b'right', binary=False
3173 )
3177 )
3174 # consume iterators in roughly the way patch.py does
3178 # consume iterators in roughly the way patch.py does
3175 b'\n'.join(headerlines)
3179 b'\n'.join(headerlines)
3176 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3180 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3177
3181
3178 timer, fm = gettimer(ui, opts)
3182 timer, fm = gettimer(ui, opts)
3179 timer(d)
3183 timer(d)
3180 fm.end()
3184 fm.end()
3181
3185
3182
3186
3183 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3187 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3184 def perfdiffwd(ui, repo, **opts):
3188 def perfdiffwd(ui, repo, **opts):
3185 """Profile diff of working directory changes"""
3189 """Profile diff of working directory changes"""
3186 opts = _byteskwargs(opts)
3190 opts = _byteskwargs(opts)
3187 timer, fm = gettimer(ui, opts)
3191 timer, fm = gettimer(ui, opts)
3188 options = {
3192 options = {
3189 'w': 'ignore_all_space',
3193 'w': 'ignore_all_space',
3190 'b': 'ignore_space_change',
3194 'b': 'ignore_space_change',
3191 'B': 'ignore_blank_lines',
3195 'B': 'ignore_blank_lines',
3192 }
3196 }
3193
3197
3194 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3198 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3195 opts = {options[c]: b'1' for c in diffopt}
3199 opts = {options[c]: b'1' for c in diffopt}
3196
3200
3197 def d():
3201 def d():
3198 ui.pushbuffer()
3202 ui.pushbuffer()
3199 commands.diff(ui, repo, **opts)
3203 commands.diff(ui, repo, **opts)
3200 ui.popbuffer()
3204 ui.popbuffer()
3201
3205
3202 diffopt = diffopt.encode('ascii')
3206 diffopt = diffopt.encode('ascii')
3203 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3207 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3204 timer(d, title=title)
3208 timer(d, title=title)
3205 fm.end()
3209 fm.end()
3206
3210
3207
3211
3208 @command(
3212 @command(
3209 b'perf::revlogindex|perfrevlogindex',
3213 b'perf::revlogindex|perfrevlogindex',
3210 revlogopts + formatteropts,
3214 revlogopts + formatteropts,
3211 b'-c|-m|FILE',
3215 b'-c|-m|FILE',
3212 )
3216 )
3213 def perfrevlogindex(ui, repo, file_=None, **opts):
3217 def perfrevlogindex(ui, repo, file_=None, **opts):
3214 """Benchmark operations against a revlog index.
3218 """Benchmark operations against a revlog index.
3215
3219
3216 This tests constructing a revlog instance, reading index data,
3220 This tests constructing a revlog instance, reading index data,
3217 parsing index data, and performing various operations related to
3221 parsing index data, and performing various operations related to
3218 index data.
3222 index data.
3219 """
3223 """
3220
3224
3221 opts = _byteskwargs(opts)
3225 opts = _byteskwargs(opts)
3222
3226
3223 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3227 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3224
3228
3225 opener = getattr(rl, 'opener') # trick linter
3229 opener = getattr(rl, 'opener') # trick linter
3226 # compat with hg <= 5.8
3230 # compat with hg <= 5.8
3227 radix = getattr(rl, 'radix', None)
3231 radix = getattr(rl, 'radix', None)
3228 indexfile = getattr(rl, '_indexfile', None)
3232 indexfile = getattr(rl, '_indexfile', None)
3229 if indexfile is None:
3233 if indexfile is None:
3230 # compatibility with <= hg-5.8
3234 # compatibility with <= hg-5.8
3231 indexfile = getattr(rl, 'indexfile')
3235 indexfile = getattr(rl, 'indexfile')
3232 data = opener.read(indexfile)
3236 data = opener.read(indexfile)
3233
3237
3234 header = struct.unpack(b'>I', data[0:4])[0]
3238 header = struct.unpack(b'>I', data[0:4])[0]
3235 version = header & 0xFFFF
3239 version = header & 0xFFFF
3236 if version == 1:
3240 if version == 1:
3237 inline = header & (1 << 16)
3241 inline = header & (1 << 16)
3238 else:
3242 else:
3239 raise error.Abort(b'unsupported revlog version: %d' % version)
3243 raise error.Abort(b'unsupported revlog version: %d' % version)
3240
3244
3241 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3245 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3242 if parse_index_v1 is None:
3246 if parse_index_v1 is None:
3243 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3247 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3244
3248
3245 rllen = len(rl)
3249 rllen = len(rl)
3246
3250
3247 node0 = rl.node(0)
3251 node0 = rl.node(0)
3248 node25 = rl.node(rllen // 4)
3252 node25 = rl.node(rllen // 4)
3249 node50 = rl.node(rllen // 2)
3253 node50 = rl.node(rllen // 2)
3250 node75 = rl.node(rllen // 4 * 3)
3254 node75 = rl.node(rllen // 4 * 3)
3251 node100 = rl.node(rllen - 1)
3255 node100 = rl.node(rllen - 1)
3252
3256
3253 allrevs = range(rllen)
3257 allrevs = range(rllen)
3254 allrevsrev = list(reversed(allrevs))
3258 allrevsrev = list(reversed(allrevs))
3255 allnodes = [rl.node(rev) for rev in range(rllen)]
3259 allnodes = [rl.node(rev) for rev in range(rllen)]
3256 allnodesrev = list(reversed(allnodes))
3260 allnodesrev = list(reversed(allnodes))
3257
3261
3258 def constructor():
3262 def constructor():
3259 if radix is not None:
3263 if radix is not None:
3260 revlog(opener, radix=radix)
3264 revlog(opener, radix=radix)
3261 else:
3265 else:
3262 # hg <= 5.8
3266 # hg <= 5.8
3263 revlog(opener, indexfile=indexfile)
3267 revlog(opener, indexfile=indexfile)
3264
3268
3265 def read():
3269 def read():
3266 with opener(indexfile) as fh:
3270 with opener(indexfile) as fh:
3267 fh.read()
3271 fh.read()
3268
3272
3269 def parseindex():
3273 def parseindex():
3270 parse_index_v1(data, inline)
3274 parse_index_v1(data, inline)
3271
3275
3272 def getentry(revornode):
3276 def getentry(revornode):
3273 index = parse_index_v1(data, inline)[0]
3277 index = parse_index_v1(data, inline)[0]
3274 index[revornode]
3278 index[revornode]
3275
3279
3276 def getentries(revs, count=1):
3280 def getentries(revs, count=1):
3277 index = parse_index_v1(data, inline)[0]
3281 index = parse_index_v1(data, inline)[0]
3278
3282
3279 for i in range(count):
3283 for i in range(count):
3280 for rev in revs:
3284 for rev in revs:
3281 index[rev]
3285 index[rev]
3282
3286
3283 def resolvenode(node):
3287 def resolvenode(node):
3284 index = parse_index_v1(data, inline)[0]
3288 index = parse_index_v1(data, inline)[0]
3285 rev = getattr(index, 'rev', None)
3289 rev = getattr(index, 'rev', None)
3286 if rev is None:
3290 if rev is None:
3287 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3291 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3288 # This only works for the C code.
3292 # This only works for the C code.
3289 if nodemap is None:
3293 if nodemap is None:
3290 return
3294 return
3291 rev = nodemap.__getitem__
3295 rev = nodemap.__getitem__
3292
3296
3293 try:
3297 try:
3294 rev(node)
3298 rev(node)
3295 except error.RevlogError:
3299 except error.RevlogError:
3296 pass
3300 pass
3297
3301
3298 def resolvenodes(nodes, count=1):
3302 def resolvenodes(nodes, count=1):
3299 index = parse_index_v1(data, inline)[0]
3303 index = parse_index_v1(data, inline)[0]
3300 rev = getattr(index, 'rev', None)
3304 rev = getattr(index, 'rev', None)
3301 if rev is None:
3305 if rev is None:
3302 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3306 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3303 # This only works for the C code.
3307 # This only works for the C code.
3304 if nodemap is None:
3308 if nodemap is None:
3305 return
3309 return
3306 rev = nodemap.__getitem__
3310 rev = nodemap.__getitem__
3307
3311
3308 for i in range(count):
3312 for i in range(count):
3309 for node in nodes:
3313 for node in nodes:
3310 try:
3314 try:
3311 rev(node)
3315 rev(node)
3312 except error.RevlogError:
3316 except error.RevlogError:
3313 pass
3317 pass
3314
3318
3315 benches = [
3319 benches = [
3316 (constructor, b'revlog constructor'),
3320 (constructor, b'revlog constructor'),
3317 (read, b'read'),
3321 (read, b'read'),
3318 (parseindex, b'create index object'),
3322 (parseindex, b'create index object'),
3319 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3323 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3320 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3324 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3321 (lambda: resolvenode(node0), b'look up node at rev 0'),
3325 (lambda: resolvenode(node0), b'look up node at rev 0'),
3322 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3326 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3323 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3327 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3324 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3328 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3325 (lambda: resolvenode(node100), b'look up node at tip'),
3329 (lambda: resolvenode(node100), b'look up node at tip'),
3326 # 2x variation is to measure caching impact.
3330 # 2x variation is to measure caching impact.
3327 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3331 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3328 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3332 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3329 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3333 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3330 (
3334 (
3331 lambda: resolvenodes(allnodesrev, 2),
3335 lambda: resolvenodes(allnodesrev, 2),
3332 b'look up all nodes 2x (reverse)',
3336 b'look up all nodes 2x (reverse)',
3333 ),
3337 ),
3334 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3338 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3335 (
3339 (
3336 lambda: getentries(allrevs, 2),
3340 lambda: getentries(allrevs, 2),
3337 b'retrieve all index entries 2x (forward)',
3341 b'retrieve all index entries 2x (forward)',
3338 ),
3342 ),
3339 (
3343 (
3340 lambda: getentries(allrevsrev),
3344 lambda: getentries(allrevsrev),
3341 b'retrieve all index entries (reverse)',
3345 b'retrieve all index entries (reverse)',
3342 ),
3346 ),
3343 (
3347 (
3344 lambda: getentries(allrevsrev, 2),
3348 lambda: getentries(allrevsrev, 2),
3345 b'retrieve all index entries 2x (reverse)',
3349 b'retrieve all index entries 2x (reverse)',
3346 ),
3350 ),
3347 ]
3351 ]
3348
3352
3349 for fn, title in benches:
3353 for fn, title in benches:
3350 timer, fm = gettimer(ui, opts)
3354 timer, fm = gettimer(ui, opts)
3351 timer(fn, title=title)
3355 timer(fn, title=title)
3352 fm.end()
3356 fm.end()
3353
3357
3354
3358
3355 @command(
3359 @command(
3356 b'perf::revlogrevisions|perfrevlogrevisions',
3360 b'perf::revlogrevisions|perfrevlogrevisions',
3357 revlogopts
3361 revlogopts
3358 + formatteropts
3362 + formatteropts
3359 + [
3363 + [
3360 (b'd', b'dist', 100, b'distance between the revisions'),
3364 (b'd', b'dist', 100, b'distance between the revisions'),
3361 (b's', b'startrev', 0, b'revision to start reading at'),
3365 (b's', b'startrev', 0, b'revision to start reading at'),
3362 (b'', b'reverse', False, b'read in reverse'),
3366 (b'', b'reverse', False, b'read in reverse'),
3363 ],
3367 ],
3364 b'-c|-m|FILE',
3368 b'-c|-m|FILE',
3365 )
3369 )
3366 def perfrevlogrevisions(
3370 def perfrevlogrevisions(
3367 ui, repo, file_=None, startrev=0, reverse=False, **opts
3371 ui, repo, file_=None, startrev=0, reverse=False, **opts
3368 ):
3372 ):
3369 """Benchmark reading a series of revisions from a revlog.
3373 """Benchmark reading a series of revisions from a revlog.
3370
3374
3371 By default, we read every ``-d/--dist`` revision from 0 to tip of
3375 By default, we read every ``-d/--dist`` revision from 0 to tip of
3372 the specified revlog.
3376 the specified revlog.
3373
3377
3374 The start revision can be defined via ``-s/--startrev``.
3378 The start revision can be defined via ``-s/--startrev``.
3375 """
3379 """
3376 opts = _byteskwargs(opts)
3380 opts = _byteskwargs(opts)
3377
3381
3378 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3382 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3379 rllen = getlen(ui)(rl)
3383 rllen = getlen(ui)(rl)
3380
3384
3381 if startrev < 0:
3385 if startrev < 0:
3382 startrev = rllen + startrev
3386 startrev = rllen + startrev
3383
3387
3384 def d():
3388 def d():
3385 rl.clearcaches()
3389 rl.clearcaches()
3386
3390
3387 beginrev = startrev
3391 beginrev = startrev
3388 endrev = rllen
3392 endrev = rllen
3389 dist = opts[b'dist']
3393 dist = opts[b'dist']
3390
3394
3391 if reverse:
3395 if reverse:
3392 beginrev, endrev = endrev - 1, beginrev - 1
3396 beginrev, endrev = endrev - 1, beginrev - 1
3393 dist = -1 * dist
3397 dist = -1 * dist
3394
3398
3395 for x in _xrange(beginrev, endrev, dist):
3399 for x in _xrange(beginrev, endrev, dist):
3396 # Old revisions don't support passing int.
3400 # Old revisions don't support passing int.
3397 n = rl.node(x)
3401 n = rl.node(x)
3398 rl.revision(n)
3402 rl.revision(n)
3399
3403
3400 timer, fm = gettimer(ui, opts)
3404 timer, fm = gettimer(ui, opts)
3401 timer(d)
3405 timer(d)
3402 fm.end()
3406 fm.end()
3403
3407
3404
3408
3405 @command(
3409 @command(
3406 b'perf::revlogwrite|perfrevlogwrite',
3410 b'perf::revlogwrite|perfrevlogwrite',
3407 revlogopts
3411 revlogopts
3408 + formatteropts
3412 + formatteropts
3409 + [
3413 + [
3410 (b's', b'startrev', 1000, b'revision to start writing at'),
3414 (b's', b'startrev', 1000, b'revision to start writing at'),
3411 (b'', b'stoprev', -1, b'last revision to write'),
3415 (b'', b'stoprev', -1, b'last revision to write'),
3412 (b'', b'count', 3, b'number of passes to perform'),
3416 (b'', b'count', 3, b'number of passes to perform'),
3413 (b'', b'details', False, b'print timing for every revisions tested'),
3417 (b'', b'details', False, b'print timing for every revisions tested'),
3414 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3418 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3415 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3419 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3416 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3420 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3417 ],
3421 ],
3418 b'-c|-m|FILE',
3422 b'-c|-m|FILE',
3419 )
3423 )
3420 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3424 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3421 """Benchmark writing a series of revisions to a revlog.
3425 """Benchmark writing a series of revisions to a revlog.
3422
3426
3423 Possible source values are:
3427 Possible source values are:
3424 * `full`: add from a full text (default).
3428 * `full`: add from a full text (default).
3425 * `parent-1`: add from a delta to the first parent
3429 * `parent-1`: add from a delta to the first parent
3426 * `parent-2`: add from a delta to the second parent if it exists
3430 * `parent-2`: add from a delta to the second parent if it exists
3427 (use a delta from the first parent otherwise)
3431 (use a delta from the first parent otherwise)
3428 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3432 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3429 * `storage`: add from the existing precomputed deltas
3433 * `storage`: add from the existing precomputed deltas
3430
3434
3431 Note: This performance command measures performance in a custom way. As a
3435 Note: This performance command measures performance in a custom way. As a
3432 result some of the global configuration of the 'perf' command does not
3436 result some of the global configuration of the 'perf' command does not
3433 apply to it:
3437 apply to it:
3434
3438
3435 * ``pre-run``: disabled
3439 * ``pre-run``: disabled
3436
3440
3437 * ``profile-benchmark``: disabled
3441 * ``profile-benchmark``: disabled
3438
3442
3439 * ``run-limits``: disabled use --count instead
3443 * ``run-limits``: disabled use --count instead
3440 """
3444 """
3441 opts = _byteskwargs(opts)
3445 opts = _byteskwargs(opts)
3442
3446
3443 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3447 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3444 rllen = getlen(ui)(rl)
3448 rllen = getlen(ui)(rl)
3445 if startrev < 0:
3449 if startrev < 0:
3446 startrev = rllen + startrev
3450 startrev = rllen + startrev
3447 if stoprev < 0:
3451 if stoprev < 0:
3448 stoprev = rllen + stoprev
3452 stoprev = rllen + stoprev
3449
3453
3450 lazydeltabase = opts['lazydeltabase']
3454 lazydeltabase = opts['lazydeltabase']
3451 source = opts['source']
3455 source = opts['source']
3452 clearcaches = opts['clear_caches']
3456 clearcaches = opts['clear_caches']
3453 validsource = (
3457 validsource = (
3454 b'full',
3458 b'full',
3455 b'parent-1',
3459 b'parent-1',
3456 b'parent-2',
3460 b'parent-2',
3457 b'parent-smallest',
3461 b'parent-smallest',
3458 b'storage',
3462 b'storage',
3459 )
3463 )
3460 if source not in validsource:
3464 if source not in validsource:
3461 raise error.Abort('invalid source type: %s' % source)
3465 raise error.Abort('invalid source type: %s' % source)
3462
3466
3463 ### actually gather results
3467 ### actually gather results
3464 count = opts['count']
3468 count = opts['count']
3465 if count <= 0:
3469 if count <= 0:
3466 raise error.Abort('invalide run count: %d' % count)
3470 raise error.Abort('invalide run count: %d' % count)
3467 allresults = []
3471 allresults = []
3468 for c in range(count):
3472 for c in range(count):
3469 timing = _timeonewrite(
3473 timing = _timeonewrite(
3470 ui,
3474 ui,
3471 rl,
3475 rl,
3472 source,
3476 source,
3473 startrev,
3477 startrev,
3474 stoprev,
3478 stoprev,
3475 c + 1,
3479 c + 1,
3476 lazydeltabase=lazydeltabase,
3480 lazydeltabase=lazydeltabase,
3477 clearcaches=clearcaches,
3481 clearcaches=clearcaches,
3478 )
3482 )
3479 allresults.append(timing)
3483 allresults.append(timing)
3480
3484
3481 ### consolidate the results in a single list
3485 ### consolidate the results in a single list
3482 results = []
3486 results = []
3483 for idx, (rev, t) in enumerate(allresults[0]):
3487 for idx, (rev, t) in enumerate(allresults[0]):
3484 ts = [t]
3488 ts = [t]
3485 for other in allresults[1:]:
3489 for other in allresults[1:]:
3486 orev, ot = other[idx]
3490 orev, ot = other[idx]
3487 assert orev == rev
3491 assert orev == rev
3488 ts.append(ot)
3492 ts.append(ot)
3489 results.append((rev, ts))
3493 results.append((rev, ts))
3490 resultcount = len(results)
3494 resultcount = len(results)
3491
3495
3492 ### Compute and display relevant statistics
3496 ### Compute and display relevant statistics
3493
3497
3494 # get a formatter
3498 # get a formatter
3495 fm = ui.formatter(b'perf', opts)
3499 fm = ui.formatter(b'perf', opts)
3496 displayall = ui.configbool(b"perf", b"all-timing", True)
3500 displayall = ui.configbool(b"perf", b"all-timing", True)
3497
3501
3498 # print individual details if requested
3502 # print individual details if requested
3499 if opts['details']:
3503 if opts['details']:
3500 for idx, item in enumerate(results, 1):
3504 for idx, item in enumerate(results, 1):
3501 rev, data = item
3505 rev, data = item
3502 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3506 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3503 formatone(fm, data, title=title, displayall=displayall)
3507 formatone(fm, data, title=title, displayall=displayall)
3504
3508
3505 # sorts results by median time
3509 # sorts results by median time
3506 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3510 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3507 # list of (name, index) to display)
3511 # list of (name, index) to display)
3508 relevants = [
3512 relevants = [
3509 ("min", 0),
3513 ("min", 0),
3510 ("10%", resultcount * 10 // 100),
3514 ("10%", resultcount * 10 // 100),
3511 ("25%", resultcount * 25 // 100),
3515 ("25%", resultcount * 25 // 100),
3512 ("50%", resultcount * 70 // 100),
3516 ("50%", resultcount * 70 // 100),
3513 ("75%", resultcount * 75 // 100),
3517 ("75%", resultcount * 75 // 100),
3514 ("90%", resultcount * 90 // 100),
3518 ("90%", resultcount * 90 // 100),
3515 ("95%", resultcount * 95 // 100),
3519 ("95%", resultcount * 95 // 100),
3516 ("99%", resultcount * 99 // 100),
3520 ("99%", resultcount * 99 // 100),
3517 ("99.9%", resultcount * 999 // 1000),
3521 ("99.9%", resultcount * 999 // 1000),
3518 ("99.99%", resultcount * 9999 // 10000),
3522 ("99.99%", resultcount * 9999 // 10000),
3519 ("99.999%", resultcount * 99999 // 100000),
3523 ("99.999%", resultcount * 99999 // 100000),
3520 ("max", -1),
3524 ("max", -1),
3521 ]
3525 ]
3522 if not ui.quiet:
3526 if not ui.quiet:
3523 for name, idx in relevants:
3527 for name, idx in relevants:
3524 data = results[idx]
3528 data = results[idx]
3525 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3529 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3526 formatone(fm, data[1], title=title, displayall=displayall)
3530 formatone(fm, data[1], title=title, displayall=displayall)
3527
3531
3528 # XXX summing that many float will not be very precise, we ignore this fact
3532 # XXX summing that many float will not be very precise, we ignore this fact
3529 # for now
3533 # for now
3530 totaltime = []
3534 totaltime = []
3531 for item in allresults:
3535 for item in allresults:
3532 totaltime.append(
3536 totaltime.append(
3533 (
3537 (
3534 sum(x[1][0] for x in item),
3538 sum(x[1][0] for x in item),
3535 sum(x[1][1] for x in item),
3539 sum(x[1][1] for x in item),
3536 sum(x[1][2] for x in item),
3540 sum(x[1][2] for x in item),
3537 )
3541 )
3538 )
3542 )
3539 formatone(
3543 formatone(
3540 fm,
3544 fm,
3541 totaltime,
3545 totaltime,
3542 title="total time (%d revs)" % resultcount,
3546 title="total time (%d revs)" % resultcount,
3543 displayall=displayall,
3547 displayall=displayall,
3544 )
3548 )
3545 fm.end()
3549 fm.end()
3546
3550
3547
3551
3548 class _faketr:
3552 class _faketr:
3549 def add(s, x, y, z=None):
3553 def add(s, x, y, z=None):
3550 return None
3554 return None
3551
3555
3552
3556
3553 def _timeonewrite(
3557 def _timeonewrite(
3554 ui,
3558 ui,
3555 orig,
3559 orig,
3556 source,
3560 source,
3557 startrev,
3561 startrev,
3558 stoprev,
3562 stoprev,
3559 runidx=None,
3563 runidx=None,
3560 lazydeltabase=True,
3564 lazydeltabase=True,
3561 clearcaches=True,
3565 clearcaches=True,
3562 ):
3566 ):
3563 timings = []
3567 timings = []
3564 tr = _faketr()
3568 tr = _faketr()
3565 with _temprevlog(ui, orig, startrev) as dest:
3569 with _temprevlog(ui, orig, startrev) as dest:
3566 if hasattr(dest, "delta_config"):
3570 if hasattr(dest, "delta_config"):
3567 dest.delta_config.lazy_delta_base = lazydeltabase
3571 dest.delta_config.lazy_delta_base = lazydeltabase
3568 else:
3572 else:
3569 dest._lazydeltabase = lazydeltabase
3573 dest._lazydeltabase = lazydeltabase
3570 revs = list(orig.revs(startrev, stoprev))
3574 revs = list(orig.revs(startrev, stoprev))
3571 total = len(revs)
3575 total = len(revs)
3572 topic = 'adding'
3576 topic = 'adding'
3573 if runidx is not None:
3577 if runidx is not None:
3574 topic += ' (run #%d)' % runidx
3578 topic += ' (run #%d)' % runidx
3575 # Support both old and new progress API
3579 # Support both old and new progress API
3576 if util.safehasattr(ui, 'makeprogress'):
3580 if util.safehasattr(ui, 'makeprogress'):
3577 progress = ui.makeprogress(topic, unit='revs', total=total)
3581 progress = ui.makeprogress(topic, unit='revs', total=total)
3578
3582
3579 def updateprogress(pos):
3583 def updateprogress(pos):
3580 progress.update(pos)
3584 progress.update(pos)
3581
3585
3582 def completeprogress():
3586 def completeprogress():
3583 progress.complete()
3587 progress.complete()
3584
3588
3585 else:
3589 else:
3586
3590
3587 def updateprogress(pos):
3591 def updateprogress(pos):
3588 ui.progress(topic, pos, unit='revs', total=total)
3592 ui.progress(topic, pos, unit='revs', total=total)
3589
3593
3590 def completeprogress():
3594 def completeprogress():
3591 ui.progress(topic, None, unit='revs', total=total)
3595 ui.progress(topic, None, unit='revs', total=total)
3592
3596
3593 for idx, rev in enumerate(revs):
3597 for idx, rev in enumerate(revs):
3594 updateprogress(idx)
3598 updateprogress(idx)
3595 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3599 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3596 if clearcaches:
3600 if clearcaches:
3597 dest.index.clearcaches()
3601 dest.index.clearcaches()
3598 dest.clearcaches()
3602 dest.clearcaches()
3599 with timeone() as r:
3603 with timeone() as r:
3600 dest.addrawrevision(*addargs, **addkwargs)
3604 dest.addrawrevision(*addargs, **addkwargs)
3601 timings.append((rev, r[0]))
3605 timings.append((rev, r[0]))
3602 updateprogress(total)
3606 updateprogress(total)
3603 completeprogress()
3607 completeprogress()
3604 return timings
3608 return timings
3605
3609
3606
3610
3607 def _getrevisionseed(orig, rev, tr, source):
3611 def _getrevisionseed(orig, rev, tr, source):
3608 from mercurial.node import nullid
3612 from mercurial.node import nullid
3609
3613
3610 linkrev = orig.linkrev(rev)
3614 linkrev = orig.linkrev(rev)
3611 node = orig.node(rev)
3615 node = orig.node(rev)
3612 p1, p2 = orig.parents(node)
3616 p1, p2 = orig.parents(node)
3613 flags = orig.flags(rev)
3617 flags = orig.flags(rev)
3614 cachedelta = None
3618 cachedelta = None
3615 text = None
3619 text = None
3616
3620
3617 if source == b'full':
3621 if source == b'full':
3618 text = orig.revision(rev)
3622 text = orig.revision(rev)
3619 elif source == b'parent-1':
3623 elif source == b'parent-1':
3620 baserev = orig.rev(p1)
3624 baserev = orig.rev(p1)
3621 cachedelta = (baserev, orig.revdiff(p1, rev))
3625 cachedelta = (baserev, orig.revdiff(p1, rev))
3622 elif source == b'parent-2':
3626 elif source == b'parent-2':
3623 parent = p2
3627 parent = p2
3624 if p2 == nullid:
3628 if p2 == nullid:
3625 parent = p1
3629 parent = p1
3626 baserev = orig.rev(parent)
3630 baserev = orig.rev(parent)
3627 cachedelta = (baserev, orig.revdiff(parent, rev))
3631 cachedelta = (baserev, orig.revdiff(parent, rev))
3628 elif source == b'parent-smallest':
3632 elif source == b'parent-smallest':
3629 p1diff = orig.revdiff(p1, rev)
3633 p1diff = orig.revdiff(p1, rev)
3630 parent = p1
3634 parent = p1
3631 diff = p1diff
3635 diff = p1diff
3632 if p2 != nullid:
3636 if p2 != nullid:
3633 p2diff = orig.revdiff(p2, rev)
3637 p2diff = orig.revdiff(p2, rev)
3634 if len(p1diff) > len(p2diff):
3638 if len(p1diff) > len(p2diff):
3635 parent = p2
3639 parent = p2
3636 diff = p2diff
3640 diff = p2diff
3637 baserev = orig.rev(parent)
3641 baserev = orig.rev(parent)
3638 cachedelta = (baserev, diff)
3642 cachedelta = (baserev, diff)
3639 elif source == b'storage':
3643 elif source == b'storage':
3640 baserev = orig.deltaparent(rev)
3644 baserev = orig.deltaparent(rev)
3641 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3645 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3642
3646
3643 return (
3647 return (
3644 (text, tr, linkrev, p1, p2),
3648 (text, tr, linkrev, p1, p2),
3645 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3649 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3646 )
3650 )
3647
3651
3648
3652
3649 @contextlib.contextmanager
3653 @contextlib.contextmanager
3650 def _temprevlog(ui, orig, truncaterev):
3654 def _temprevlog(ui, orig, truncaterev):
3651 from mercurial import vfs as vfsmod
3655 from mercurial import vfs as vfsmod
3652
3656
3653 if orig._inline:
3657 if orig._inline:
3654 raise error.Abort('not supporting inline revlog (yet)')
3658 raise error.Abort('not supporting inline revlog (yet)')
3655 revlogkwargs = {}
3659 revlogkwargs = {}
3656 k = 'upperboundcomp'
3660 k = 'upperboundcomp'
3657 if util.safehasattr(orig, k):
3661 if util.safehasattr(orig, k):
3658 revlogkwargs[k] = getattr(orig, k)
3662 revlogkwargs[k] = getattr(orig, k)
3659
3663
3660 indexfile = getattr(orig, '_indexfile', None)
3664 indexfile = getattr(orig, '_indexfile', None)
3661 if indexfile is None:
3665 if indexfile is None:
3662 # compatibility with <= hg-5.8
3666 # compatibility with <= hg-5.8
3663 indexfile = getattr(orig, 'indexfile')
3667 indexfile = getattr(orig, 'indexfile')
3664 origindexpath = orig.opener.join(indexfile)
3668 origindexpath = orig.opener.join(indexfile)
3665
3669
3666 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3670 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3667 origdatapath = orig.opener.join(datafile)
3671 origdatapath = orig.opener.join(datafile)
3668 radix = b'revlog'
3672 radix = b'revlog'
3669 indexname = b'revlog.i'
3673 indexname = b'revlog.i'
3670 dataname = b'revlog.d'
3674 dataname = b'revlog.d'
3671
3675
3672 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3676 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3673 try:
3677 try:
3674 # copy the data file in a temporary directory
3678 # copy the data file in a temporary directory
3675 ui.debug('copying data in %s\n' % tmpdir)
3679 ui.debug('copying data in %s\n' % tmpdir)
3676 destindexpath = os.path.join(tmpdir, 'revlog.i')
3680 destindexpath = os.path.join(tmpdir, 'revlog.i')
3677 destdatapath = os.path.join(tmpdir, 'revlog.d')
3681 destdatapath = os.path.join(tmpdir, 'revlog.d')
3678 shutil.copyfile(origindexpath, destindexpath)
3682 shutil.copyfile(origindexpath, destindexpath)
3679 shutil.copyfile(origdatapath, destdatapath)
3683 shutil.copyfile(origdatapath, destdatapath)
3680
3684
3681 # remove the data we want to add again
3685 # remove the data we want to add again
3682 ui.debug('truncating data to be rewritten\n')
3686 ui.debug('truncating data to be rewritten\n')
3683 with open(destindexpath, 'ab') as index:
3687 with open(destindexpath, 'ab') as index:
3684 index.seek(0)
3688 index.seek(0)
3685 index.truncate(truncaterev * orig._io.size)
3689 index.truncate(truncaterev * orig._io.size)
3686 with open(destdatapath, 'ab') as data:
3690 with open(destdatapath, 'ab') as data:
3687 data.seek(0)
3691 data.seek(0)
3688 data.truncate(orig.start(truncaterev))
3692 data.truncate(orig.start(truncaterev))
3689
3693
3690 # instantiate a new revlog from the temporary copy
3694 # instantiate a new revlog from the temporary copy
3691 ui.debug('truncating adding to be rewritten\n')
3695 ui.debug('truncating adding to be rewritten\n')
3692 vfs = vfsmod.vfs(tmpdir)
3696 vfs = vfsmod.vfs(tmpdir)
3693 vfs.options = getattr(orig.opener, 'options', None)
3697 vfs.options = getattr(orig.opener, 'options', None)
3694
3698
3695 try:
3699 try:
3696 dest = revlog(vfs, radix=radix, **revlogkwargs)
3700 dest = revlog(vfs, radix=radix, **revlogkwargs)
3697 except TypeError:
3701 except TypeError:
3698 dest = revlog(
3702 dest = revlog(
3699 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3703 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3700 )
3704 )
3701 if dest._inline:
3705 if dest._inline:
3702 raise error.Abort('not supporting inline revlog (yet)')
3706 raise error.Abort('not supporting inline revlog (yet)')
3703 # make sure internals are initialized
3707 # make sure internals are initialized
3704 dest.revision(len(dest) - 1)
3708 dest.revision(len(dest) - 1)
3705 yield dest
3709 yield dest
3706 del dest, vfs
3710 del dest, vfs
3707 finally:
3711 finally:
3708 shutil.rmtree(tmpdir, True)
3712 shutil.rmtree(tmpdir, True)
3709
3713
3710
3714
3711 @command(
3715 @command(
3712 b'perf::revlogchunks|perfrevlogchunks',
3716 b'perf::revlogchunks|perfrevlogchunks',
3713 revlogopts
3717 revlogopts
3714 + formatteropts
3718 + formatteropts
3715 + [
3719 + [
3716 (b'e', b'engines', b'', b'compression engines to use'),
3720 (b'e', b'engines', b'', b'compression engines to use'),
3717 (b's', b'startrev', 0, b'revision to start at'),
3721 (b's', b'startrev', 0, b'revision to start at'),
3718 ],
3722 ],
3719 b'-c|-m|FILE',
3723 b'-c|-m|FILE',
3720 )
3724 )
3721 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3725 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3722 """Benchmark operations on revlog chunks.
3726 """Benchmark operations on revlog chunks.
3723
3727
3724 Logically, each revlog is a collection of fulltext revisions. However,
3728 Logically, each revlog is a collection of fulltext revisions. However,
3725 stored within each revlog are "chunks" of possibly compressed data. This
3729 stored within each revlog are "chunks" of possibly compressed data. This
3726 data needs to be read and decompressed or compressed and written.
3730 data needs to be read and decompressed or compressed and written.
3727
3731
3728 This command measures the time it takes to read+decompress and recompress
3732 This command measures the time it takes to read+decompress and recompress
3729 chunks in a revlog. It effectively isolates I/O and compression performance.
3733 chunks in a revlog. It effectively isolates I/O and compression performance.
3730 For measurements of higher-level operations like resolving revisions,
3734 For measurements of higher-level operations like resolving revisions,
3731 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3735 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3732 """
3736 """
3733 opts = _byteskwargs(opts)
3737 opts = _byteskwargs(opts)
3734
3738
3735 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3739 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3736
3740
3737 # - _chunkraw was renamed to _getsegmentforrevs
3741 # - _chunkraw was renamed to _getsegmentforrevs
3738 # - _getsegmentforrevs was moved on the inner object
3742 # - _getsegmentforrevs was moved on the inner object
3739 try:
3743 try:
3740 segmentforrevs = rl._inner.get_segment_for_revs
3744 segmentforrevs = rl._inner.get_segment_for_revs
3741 except AttributeError:
3745 except AttributeError:
3742 try:
3746 try:
3743 segmentforrevs = rl._getsegmentforrevs
3747 segmentforrevs = rl._getsegmentforrevs
3744 except AttributeError:
3748 except AttributeError:
3745 segmentforrevs = rl._chunkraw
3749 segmentforrevs = rl._chunkraw
3746
3750
3747 # Verify engines argument.
3751 # Verify engines argument.
3748 if engines:
3752 if engines:
3749 engines = {e.strip() for e in engines.split(b',')}
3753 engines = {e.strip() for e in engines.split(b',')}
3750 for engine in engines:
3754 for engine in engines:
3751 try:
3755 try:
3752 util.compressionengines[engine]
3756 util.compressionengines[engine]
3753 except KeyError:
3757 except KeyError:
3754 raise error.Abort(b'unknown compression engine: %s' % engine)
3758 raise error.Abort(b'unknown compression engine: %s' % engine)
3755 else:
3759 else:
3756 engines = []
3760 engines = []
3757 for e in util.compengines:
3761 for e in util.compengines:
3758 engine = util.compengines[e]
3762 engine = util.compengines[e]
3759 try:
3763 try:
3760 if engine.available():
3764 if engine.available():
3761 engine.revlogcompressor().compress(b'dummy')
3765 engine.revlogcompressor().compress(b'dummy')
3762 engines.append(e)
3766 engines.append(e)
3763 except NotImplementedError:
3767 except NotImplementedError:
3764 pass
3768 pass
3765
3769
3766 revs = list(rl.revs(startrev, len(rl) - 1))
3770 revs = list(rl.revs(startrev, len(rl) - 1))
3767
3771
3768 @contextlib.contextmanager
3772 @contextlib.contextmanager
3769 def reading(rl):
3773 def reading(rl):
3770 if getattr(rl, 'reading', None) is not None:
3774 if getattr(rl, 'reading', None) is not None:
3771 with rl.reading():
3775 with rl.reading():
3772 yield None
3776 yield None
3773 elif rl._inline:
3777 elif rl._inline:
3774 indexfile = getattr(rl, '_indexfile', None)
3778 indexfile = getattr(rl, '_indexfile', None)
3775 if indexfile is None:
3779 if indexfile is None:
3776 # compatibility with <= hg-5.8
3780 # compatibility with <= hg-5.8
3777 indexfile = getattr(rl, 'indexfile')
3781 indexfile = getattr(rl, 'indexfile')
3778 yield getsvfs(repo)(indexfile)
3782 yield getsvfs(repo)(indexfile)
3779 else:
3783 else:
3780 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3784 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3781 yield getsvfs(repo)(datafile)
3785 yield getsvfs(repo)(datafile)
3782
3786
3783 if getattr(rl, 'reading', None) is not None:
3787 if getattr(rl, 'reading', None) is not None:
3784
3788
3785 @contextlib.contextmanager
3789 @contextlib.contextmanager
3786 def lazy_reading(rl):
3790 def lazy_reading(rl):
3787 with rl.reading():
3791 with rl.reading():
3788 yield
3792 yield
3789
3793
3790 else:
3794 else:
3791
3795
3792 @contextlib.contextmanager
3796 @contextlib.contextmanager
3793 def lazy_reading(rl):
3797 def lazy_reading(rl):
3794 yield
3798 yield
3795
3799
3796 def doread():
3800 def doread():
3797 rl.clearcaches()
3801 rl.clearcaches()
3798 for rev in revs:
3802 for rev in revs:
3799 with lazy_reading(rl):
3803 with lazy_reading(rl):
3800 segmentforrevs(rev, rev)
3804 segmentforrevs(rev, rev)
3801
3805
3802 def doreadcachedfh():
3806 def doreadcachedfh():
3803 rl.clearcaches()
3807 rl.clearcaches()
3804 with reading(rl) as fh:
3808 with reading(rl) as fh:
3805 if fh is not None:
3809 if fh is not None:
3806 for rev in revs:
3810 for rev in revs:
3807 segmentforrevs(rev, rev, df=fh)
3811 segmentforrevs(rev, rev, df=fh)
3808 else:
3812 else:
3809 for rev in revs:
3813 for rev in revs:
3810 segmentforrevs(rev, rev)
3814 segmentforrevs(rev, rev)
3811
3815
3812 def doreadbatch():
3816 def doreadbatch():
3813 rl.clearcaches()
3817 rl.clearcaches()
3814 with lazy_reading(rl):
3818 with lazy_reading(rl):
3815 segmentforrevs(revs[0], revs[-1])
3819 segmentforrevs(revs[0], revs[-1])
3816
3820
3817 def doreadbatchcachedfh():
3821 def doreadbatchcachedfh():
3818 rl.clearcaches()
3822 rl.clearcaches()
3819 with reading(rl) as fh:
3823 with reading(rl) as fh:
3820 if fh is not None:
3824 if fh is not None:
3821 segmentforrevs(revs[0], revs[-1], df=fh)
3825 segmentforrevs(revs[0], revs[-1], df=fh)
3822 else:
3826 else:
3823 segmentforrevs(revs[0], revs[-1])
3827 segmentforrevs(revs[0], revs[-1])
3824
3828
3825 def dochunk():
3829 def dochunk():
3826 rl.clearcaches()
3830 rl.clearcaches()
3827 # chunk used to be available directly on the revlog
3831 # chunk used to be available directly on the revlog
3828 _chunk = getattr(rl, '_inner', rl)._chunk
3832 _chunk = getattr(rl, '_inner', rl)._chunk
3829 with reading(rl) as fh:
3833 with reading(rl) as fh:
3830 if fh is not None:
3834 if fh is not None:
3831 for rev in revs:
3835 for rev in revs:
3832 _chunk(rev, df=fh)
3836 _chunk(rev, df=fh)
3833 else:
3837 else:
3834 for rev in revs:
3838 for rev in revs:
3835 _chunk(rev)
3839 _chunk(rev)
3836
3840
3837 chunks = [None]
3841 chunks = [None]
3838
3842
3839 def dochunkbatch():
3843 def dochunkbatch():
3840 rl.clearcaches()
3844 rl.clearcaches()
3841 _chunks = getattr(rl, '_inner', rl)._chunks
3845 _chunks = getattr(rl, '_inner', rl)._chunks
3842 with reading(rl) as fh:
3846 with reading(rl) as fh:
3843 if fh is not None:
3847 if fh is not None:
3844 # Save chunks as a side-effect.
3848 # Save chunks as a side-effect.
3845 chunks[0] = _chunks(revs, df=fh)
3849 chunks[0] = _chunks(revs, df=fh)
3846 else:
3850 else:
3847 # Save chunks as a side-effect.
3851 # Save chunks as a side-effect.
3848 chunks[0] = _chunks(revs)
3852 chunks[0] = _chunks(revs)
3849
3853
3850 def docompress(compressor):
3854 def docompress(compressor):
3851 rl.clearcaches()
3855 rl.clearcaches()
3852
3856
3853 compressor_holder = getattr(rl, '_inner', rl)
3857 compressor_holder = getattr(rl, '_inner', rl)
3854
3858
3855 try:
3859 try:
3856 # Swap in the requested compression engine.
3860 # Swap in the requested compression engine.
3857 oldcompressor = compressor_holder._compressor
3861 oldcompressor = compressor_holder._compressor
3858 compressor_holder._compressor = compressor
3862 compressor_holder._compressor = compressor
3859 for chunk in chunks[0]:
3863 for chunk in chunks[0]:
3860 rl.compress(chunk)
3864 rl.compress(chunk)
3861 finally:
3865 finally:
3862 compressor_holder._compressor = oldcompressor
3866 compressor_holder._compressor = oldcompressor
3863
3867
3864 benches = [
3868 benches = [
3865 (lambda: doread(), b'read'),
3869 (lambda: doread(), b'read'),
3866 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3870 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3867 (lambda: doreadbatch(), b'read batch'),
3871 (lambda: doreadbatch(), b'read batch'),
3868 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3872 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3869 (lambda: dochunk(), b'chunk'),
3873 (lambda: dochunk(), b'chunk'),
3870 (lambda: dochunkbatch(), b'chunk batch'),
3874 (lambda: dochunkbatch(), b'chunk batch'),
3871 ]
3875 ]
3872
3876
3873 for engine in sorted(engines):
3877 for engine in sorted(engines):
3874 compressor = util.compengines[engine].revlogcompressor()
3878 compressor = util.compengines[engine].revlogcompressor()
3875 benches.append(
3879 benches.append(
3876 (
3880 (
3877 functools.partial(docompress, compressor),
3881 functools.partial(docompress, compressor),
3878 b'compress w/ %s' % engine,
3882 b'compress w/ %s' % engine,
3879 )
3883 )
3880 )
3884 )
3881
3885
3882 for fn, title in benches:
3886 for fn, title in benches:
3883 timer, fm = gettimer(ui, opts)
3887 timer, fm = gettimer(ui, opts)
3884 timer(fn, title=title)
3888 timer(fn, title=title)
3885 fm.end()
3889 fm.end()
3886
3890
3887
3891
3888 @command(
3892 @command(
3889 b'perf::revlogrevision|perfrevlogrevision',
3893 b'perf::revlogrevision|perfrevlogrevision',
3890 revlogopts
3894 revlogopts
3891 + formatteropts
3895 + formatteropts
3892 + [(b'', b'cache', False, b'use caches instead of clearing')],
3896 + [(b'', b'cache', False, b'use caches instead of clearing')],
3893 b'-c|-m|FILE REV',
3897 b'-c|-m|FILE REV',
3894 )
3898 )
3895 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3899 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3896 """Benchmark obtaining a revlog revision.
3900 """Benchmark obtaining a revlog revision.
3897
3901
3898 Obtaining a revlog revision consists of roughly the following steps:
3902 Obtaining a revlog revision consists of roughly the following steps:
3899
3903
3900 1. Compute the delta chain
3904 1. Compute the delta chain
3901 2. Slice the delta chain if applicable
3905 2. Slice the delta chain if applicable
3902 3. Obtain the raw chunks for that delta chain
3906 3. Obtain the raw chunks for that delta chain
3903 4. Decompress each raw chunk
3907 4. Decompress each raw chunk
3904 5. Apply binary patches to obtain fulltext
3908 5. Apply binary patches to obtain fulltext
3905 6. Verify hash of fulltext
3909 6. Verify hash of fulltext
3906
3910
3907 This command measures the time spent in each of these phases.
3911 This command measures the time spent in each of these phases.
3908 """
3912 """
3909 opts = _byteskwargs(opts)
3913 opts = _byteskwargs(opts)
3910
3914
3911 if opts.get(b'changelog') or opts.get(b'manifest'):
3915 if opts.get(b'changelog') or opts.get(b'manifest'):
3912 file_, rev = None, file_
3916 file_, rev = None, file_
3913 elif rev is None:
3917 elif rev is None:
3914 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3918 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3915
3919
3916 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3920 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3917
3921
3918 # _chunkraw was renamed to _getsegmentforrevs.
3922 # _chunkraw was renamed to _getsegmentforrevs.
3919 try:
3923 try:
3920 segmentforrevs = r._inner.get_segment_for_revs
3924 segmentforrevs = r._inner.get_segment_for_revs
3921 except AttributeError:
3925 except AttributeError:
3922 try:
3926 try:
3923 segmentforrevs = r._getsegmentforrevs
3927 segmentforrevs = r._getsegmentforrevs
3924 except AttributeError:
3928 except AttributeError:
3925 segmentforrevs = r._chunkraw
3929 segmentforrevs = r._chunkraw
3926
3930
3927 node = r.lookup(rev)
3931 node = r.lookup(rev)
3928 rev = r.rev(node)
3932 rev = r.rev(node)
3929
3933
3930 if getattr(r, 'reading', None) is not None:
3934 if getattr(r, 'reading', None) is not None:
3931
3935
3932 @contextlib.contextmanager
3936 @contextlib.contextmanager
3933 def lazy_reading(r):
3937 def lazy_reading(r):
3934 with r.reading():
3938 with r.reading():
3935 yield
3939 yield
3936
3940
3937 else:
3941 else:
3938
3942
3939 @contextlib.contextmanager
3943 @contextlib.contextmanager
3940 def lazy_reading(r):
3944 def lazy_reading(r):
3941 yield
3945 yield
3942
3946
3943 def getrawchunks(data, chain):
3947 def getrawchunks(data, chain):
3944 start = r.start
3948 start = r.start
3945 length = r.length
3949 length = r.length
3946 inline = r._inline
3950 inline = r._inline
3947 try:
3951 try:
3948 iosize = r.index.entry_size
3952 iosize = r.index.entry_size
3949 except AttributeError:
3953 except AttributeError:
3950 iosize = r._io.size
3954 iosize = r._io.size
3951 buffer = util.buffer
3955 buffer = util.buffer
3952
3956
3953 chunks = []
3957 chunks = []
3954 ladd = chunks.append
3958 ladd = chunks.append
3955 for idx, item in enumerate(chain):
3959 for idx, item in enumerate(chain):
3956 offset = start(item[0])
3960 offset = start(item[0])
3957 bits = data[idx]
3961 bits = data[idx]
3958 for rev in item:
3962 for rev in item:
3959 chunkstart = start(rev)
3963 chunkstart = start(rev)
3960 if inline:
3964 if inline:
3961 chunkstart += (rev + 1) * iosize
3965 chunkstart += (rev + 1) * iosize
3962 chunklength = length(rev)
3966 chunklength = length(rev)
3963 ladd(buffer(bits, chunkstart - offset, chunklength))
3967 ladd(buffer(bits, chunkstart - offset, chunklength))
3964
3968
3965 return chunks
3969 return chunks
3966
3970
3967 def dodeltachain(rev):
3971 def dodeltachain(rev):
3968 if not cache:
3972 if not cache:
3969 r.clearcaches()
3973 r.clearcaches()
3970 r._deltachain(rev)
3974 r._deltachain(rev)
3971
3975
3972 def doread(chain):
3976 def doread(chain):
3973 if not cache:
3977 if not cache:
3974 r.clearcaches()
3978 r.clearcaches()
3975 for item in slicedchain:
3979 for item in slicedchain:
3976 with lazy_reading(r):
3980 with lazy_reading(r):
3977 segmentforrevs(item[0], item[-1])
3981 segmentforrevs(item[0], item[-1])
3978
3982
3979 def doslice(r, chain, size):
3983 def doslice(r, chain, size):
3980 for s in slicechunk(r, chain, targetsize=size):
3984 for s in slicechunk(r, chain, targetsize=size):
3981 pass
3985 pass
3982
3986
3983 def dorawchunks(data, chain):
3987 def dorawchunks(data, chain):
3984 if not cache:
3988 if not cache:
3985 r.clearcaches()
3989 r.clearcaches()
3986 getrawchunks(data, chain)
3990 getrawchunks(data, chain)
3987
3991
3988 def dodecompress(chunks):
3992 def dodecompress(chunks):
3989 decomp = r.decompress
3993 decomp = r.decompress
3990 for chunk in chunks:
3994 for chunk in chunks:
3991 decomp(chunk)
3995 decomp(chunk)
3992
3996
3993 def dopatch(text, bins):
3997 def dopatch(text, bins):
3994 if not cache:
3998 if not cache:
3995 r.clearcaches()
3999 r.clearcaches()
3996 mdiff.patches(text, bins)
4000 mdiff.patches(text, bins)
3997
4001
3998 def dohash(text):
4002 def dohash(text):
3999 if not cache:
4003 if not cache:
4000 r.clearcaches()
4004 r.clearcaches()
4001 r.checkhash(text, node, rev=rev)
4005 r.checkhash(text, node, rev=rev)
4002
4006
4003 def dorevision():
4007 def dorevision():
4004 if not cache:
4008 if not cache:
4005 r.clearcaches()
4009 r.clearcaches()
4006 r.revision(node)
4010 r.revision(node)
4007
4011
4008 try:
4012 try:
4009 from mercurial.revlogutils.deltas import slicechunk
4013 from mercurial.revlogutils.deltas import slicechunk
4010 except ImportError:
4014 except ImportError:
4011 slicechunk = getattr(revlog, '_slicechunk', None)
4015 slicechunk = getattr(revlog, '_slicechunk', None)
4012
4016
4013 size = r.length(rev)
4017 size = r.length(rev)
4014 chain = r._deltachain(rev)[0]
4018 chain = r._deltachain(rev)[0]
4015
4019
4016 with_sparse_read = False
4020 with_sparse_read = False
4017 if hasattr(r, 'data_config'):
4021 if hasattr(r, 'data_config'):
4018 with_sparse_read = r.data_config.with_sparse_read
4022 with_sparse_read = r.data_config.with_sparse_read
4019 elif hasattr(r, '_withsparseread'):
4023 elif hasattr(r, '_withsparseread'):
4020 with_sparse_read = r._withsparseread
4024 with_sparse_read = r._withsparseread
4021 if with_sparse_read:
4025 if with_sparse_read:
4022 slicedchain = (chain,)
4026 slicedchain = (chain,)
4023 else:
4027 else:
4024 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4028 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4025 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4029 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4026 rawchunks = getrawchunks(data, slicedchain)
4030 rawchunks = getrawchunks(data, slicedchain)
4027 bins = r._inner._chunks(chain)
4031 bins = r._inner._chunks(chain)
4028 text = bytes(bins[0])
4032 text = bytes(bins[0])
4029 bins = bins[1:]
4033 bins = bins[1:]
4030 text = mdiff.patches(text, bins)
4034 text = mdiff.patches(text, bins)
4031
4035
4032 benches = [
4036 benches = [
4033 (lambda: dorevision(), b'full'),
4037 (lambda: dorevision(), b'full'),
4034 (lambda: dodeltachain(rev), b'deltachain'),
4038 (lambda: dodeltachain(rev), b'deltachain'),
4035 (lambda: doread(chain), b'read'),
4039 (lambda: doread(chain), b'read'),
4036 ]
4040 ]
4037
4041
4038 if with_sparse_read:
4042 if with_sparse_read:
4039 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4043 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4040 benches.append(slicing)
4044 benches.append(slicing)
4041
4045
4042 benches.extend(
4046 benches.extend(
4043 [
4047 [
4044 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4048 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4045 (lambda: dodecompress(rawchunks), b'decompress'),
4049 (lambda: dodecompress(rawchunks), b'decompress'),
4046 (lambda: dopatch(text, bins), b'patch'),
4050 (lambda: dopatch(text, bins), b'patch'),
4047 (lambda: dohash(text), b'hash'),
4051 (lambda: dohash(text), b'hash'),
4048 ]
4052 ]
4049 )
4053 )
4050
4054
4051 timer, fm = gettimer(ui, opts)
4055 timer, fm = gettimer(ui, opts)
4052 for fn, title in benches:
4056 for fn, title in benches:
4053 timer(fn, title=title)
4057 timer(fn, title=title)
4054 fm.end()
4058 fm.end()
4055
4059
4056
4060
4057 @command(
4061 @command(
4058 b'perf::revset|perfrevset',
4062 b'perf::revset|perfrevset',
4059 [
4063 [
4060 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4064 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4061 (b'', b'contexts', False, b'obtain changectx for each revision'),
4065 (b'', b'contexts', False, b'obtain changectx for each revision'),
4062 ]
4066 ]
4063 + formatteropts,
4067 + formatteropts,
4064 b"REVSET",
4068 b"REVSET",
4065 )
4069 )
4066 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4070 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4067 """benchmark the execution time of a revset
4071 """benchmark the execution time of a revset
4068
4072
4069 Use the --clean option if need to evaluate the impact of build volatile
4073 Use the --clean option if need to evaluate the impact of build volatile
4070 revisions set cache on the revset execution. Volatile cache hold filtered
4074 revisions set cache on the revset execution. Volatile cache hold filtered
4071 and obsolete related cache."""
4075 and obsolete related cache."""
4072 opts = _byteskwargs(opts)
4076 opts = _byteskwargs(opts)
4073
4077
4074 timer, fm = gettimer(ui, opts)
4078 timer, fm = gettimer(ui, opts)
4075
4079
4076 def d():
4080 def d():
4077 if clear:
4081 if clear:
4078 repo.invalidatevolatilesets()
4082 repo.invalidatevolatilesets()
4079 if contexts:
4083 if contexts:
4080 for ctx in repo.set(expr):
4084 for ctx in repo.set(expr):
4081 pass
4085 pass
4082 else:
4086 else:
4083 for r in repo.revs(expr):
4087 for r in repo.revs(expr):
4084 pass
4088 pass
4085
4089
4086 timer(d)
4090 timer(d)
4087 fm.end()
4091 fm.end()
4088
4092
4089
4093
4090 @command(
4094 @command(
4091 b'perf::volatilesets|perfvolatilesets',
4095 b'perf::volatilesets|perfvolatilesets',
4092 [
4096 [
4093 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4097 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4094 ]
4098 ]
4095 + formatteropts,
4099 + formatteropts,
4096 )
4100 )
4097 def perfvolatilesets(ui, repo, *names, **opts):
4101 def perfvolatilesets(ui, repo, *names, **opts):
4098 """benchmark the computation of various volatile set
4102 """benchmark the computation of various volatile set
4099
4103
4100 Volatile set computes element related to filtering and obsolescence."""
4104 Volatile set computes element related to filtering and obsolescence."""
4101 opts = _byteskwargs(opts)
4105 opts = _byteskwargs(opts)
4102 timer, fm = gettimer(ui, opts)
4106 timer, fm = gettimer(ui, opts)
4103 repo = repo.unfiltered()
4107 repo = repo.unfiltered()
4104
4108
4105 def getobs(name):
4109 def getobs(name):
4106 def d():
4110 def d():
4107 repo.invalidatevolatilesets()
4111 repo.invalidatevolatilesets()
4108 if opts[b'clear_obsstore']:
4112 if opts[b'clear_obsstore']:
4109 clearfilecache(repo, b'obsstore')
4113 clearfilecache(repo, b'obsstore')
4110 obsolete.getrevs(repo, name)
4114 obsolete.getrevs(repo, name)
4111
4115
4112 return d
4116 return d
4113
4117
4114 allobs = sorted(obsolete.cachefuncs)
4118 allobs = sorted(obsolete.cachefuncs)
4115 if names:
4119 if names:
4116 allobs = [n for n in allobs if n in names]
4120 allobs = [n for n in allobs if n in names]
4117
4121
4118 for name in allobs:
4122 for name in allobs:
4119 timer(getobs(name), title=name)
4123 timer(getobs(name), title=name)
4120
4124
4121 def getfiltered(name):
4125 def getfiltered(name):
4122 def d():
4126 def d():
4123 repo.invalidatevolatilesets()
4127 repo.invalidatevolatilesets()
4124 if opts[b'clear_obsstore']:
4128 if opts[b'clear_obsstore']:
4125 clearfilecache(repo, b'obsstore')
4129 clearfilecache(repo, b'obsstore')
4126 repoview.filterrevs(repo, name)
4130 repoview.filterrevs(repo, name)
4127
4131
4128 return d
4132 return d
4129
4133
4130 allfilter = sorted(repoview.filtertable)
4134 allfilter = sorted(repoview.filtertable)
4131 if names:
4135 if names:
4132 allfilter = [n for n in allfilter if n in names]
4136 allfilter = [n for n in allfilter if n in names]
4133
4137
4134 for name in allfilter:
4138 for name in allfilter:
4135 timer(getfiltered(name), title=name)
4139 timer(getfiltered(name), title=name)
4136 fm.end()
4140 fm.end()
4137
4141
4138
4142
4139 @command(
4143 @command(
4140 b'perf::branchmap|perfbranchmap',
4144 b'perf::branchmap|perfbranchmap',
4141 [
4145 [
4142 (b'f', b'full', False, b'Includes build time of subset'),
4146 (b'f', b'full', False, b'Includes build time of subset'),
4143 (
4147 (
4144 b'',
4148 b'',
4145 b'clear-revbranch',
4149 b'clear-revbranch',
4146 False,
4150 False,
4147 b'purge the revbranch cache between computation',
4151 b'purge the revbranch cache between computation',
4148 ),
4152 ),
4149 ]
4153 ]
4150 + formatteropts,
4154 + formatteropts,
4151 )
4155 )
4152 def perfbranchmap(ui, repo, *filternames, **opts):
4156 def perfbranchmap(ui, repo, *filternames, **opts):
4153 """benchmark the update of a branchmap
4157 """benchmark the update of a branchmap
4154
4158
4155 This benchmarks the full repo.branchmap() call with read and write disabled
4159 This benchmarks the full repo.branchmap() call with read and write disabled
4156 """
4160 """
4157 opts = _byteskwargs(opts)
4161 opts = _byteskwargs(opts)
4158 full = opts.get(b"full", False)
4162 full = opts.get(b"full", False)
4159 clear_revbranch = opts.get(b"clear_revbranch", False)
4163 clear_revbranch = opts.get(b"clear_revbranch", False)
4160 timer, fm = gettimer(ui, opts)
4164 timer, fm = gettimer(ui, opts)
4161
4165
4162 def getbranchmap(filtername):
4166 def getbranchmap(filtername):
4163 """generate a benchmark function for the filtername"""
4167 """generate a benchmark function for the filtername"""
4164 if filtername is None:
4168 if filtername is None:
4165 view = repo
4169 view = repo
4166 else:
4170 else:
4167 view = repo.filtered(filtername)
4171 view = repo.filtered(filtername)
4168 if util.safehasattr(view._branchcaches, '_per_filter'):
4172 if util.safehasattr(view._branchcaches, '_per_filter'):
4169 filtered = view._branchcaches._per_filter
4173 filtered = view._branchcaches._per_filter
4170 else:
4174 else:
4171 # older versions
4175 # older versions
4172 filtered = view._branchcaches
4176 filtered = view._branchcaches
4173
4177
4174 def d():
4178 def d():
4175 if clear_revbranch:
4179 if clear_revbranch:
4176 repo.revbranchcache()._clear()
4180 repo.revbranchcache()._clear()
4177 if full:
4181 if full:
4178 view._branchcaches.clear()
4182 view._branchcaches.clear()
4179 else:
4183 else:
4180 filtered.pop(filtername, None)
4184 filtered.pop(filtername, None)
4181 view.branchmap()
4185 view.branchmap()
4182
4186
4183 return d
4187 return d
4184
4188
4185 # add filter in smaller subset to bigger subset
4189 # add filter in smaller subset to bigger subset
4186 possiblefilters = set(repoview.filtertable)
4190 possiblefilters = set(repoview.filtertable)
4187 if filternames:
4191 if filternames:
4188 possiblefilters &= set(filternames)
4192 possiblefilters &= set(filternames)
4189 subsettable = getbranchmapsubsettable()
4193 subsettable = getbranchmapsubsettable()
4190 allfilters = []
4194 allfilters = []
4191 while possiblefilters:
4195 while possiblefilters:
4192 for name in possiblefilters:
4196 for name in possiblefilters:
4193 subset = subsettable.get(name)
4197 subset = subsettable.get(name)
4194 if subset not in possiblefilters:
4198 if subset not in possiblefilters:
4195 break
4199 break
4196 else:
4200 else:
4197 assert False, b'subset cycle %s!' % possiblefilters
4201 assert False, b'subset cycle %s!' % possiblefilters
4198 allfilters.append(name)
4202 allfilters.append(name)
4199 possiblefilters.remove(name)
4203 possiblefilters.remove(name)
4200
4204
4201 # warm the cache
4205 # warm the cache
4202 if not full:
4206 if not full:
4203 for name in allfilters:
4207 for name in allfilters:
4204 repo.filtered(name).branchmap()
4208 repo.filtered(name).branchmap()
4205 if not filternames or b'unfiltered' in filternames:
4209 if not filternames or b'unfiltered' in filternames:
4206 # add unfiltered
4210 # add unfiltered
4207 allfilters.append(None)
4211 allfilters.append(None)
4208
4212
4209 old_branch_cache_from_file = None
4213 old_branch_cache_from_file = None
4210 branchcacheread = None
4214 branchcacheread = None
4211 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4215 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4212 old_branch_cache_from_file = branchmap.branch_cache_from_file
4216 old_branch_cache_from_file = branchmap.branch_cache_from_file
4213 branchmap.branch_cache_from_file = lambda *args: None
4217 branchmap.branch_cache_from_file = lambda *args: None
4214 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4218 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4215 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4219 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4216 branchcacheread.set(classmethod(lambda *args: None))
4220 branchcacheread.set(classmethod(lambda *args: None))
4217 else:
4221 else:
4218 # older versions
4222 # older versions
4219 branchcacheread = safeattrsetter(branchmap, b'read')
4223 branchcacheread = safeattrsetter(branchmap, b'read')
4220 branchcacheread.set(lambda *args: None)
4224 branchcacheread.set(lambda *args: None)
4221 if util.safehasattr(branchmap, '_LocalBranchCache'):
4225 if util.safehasattr(branchmap, '_LocalBranchCache'):
4222 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4226 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4223 branchcachewrite.set(lambda *args: None)
4227 branchcachewrite.set(lambda *args: None)
4224 else:
4228 else:
4225 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4229 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4226 branchcachewrite.set(lambda *args: None)
4230 branchcachewrite.set(lambda *args: None)
4227 try:
4231 try:
4228 for name in allfilters:
4232 for name in allfilters:
4229 printname = name
4233 printname = name
4230 if name is None:
4234 if name is None:
4231 printname = b'unfiltered'
4235 printname = b'unfiltered'
4232 timer(getbranchmap(name), title=printname)
4236 timer(getbranchmap(name), title=printname)
4233 finally:
4237 finally:
4234 if old_branch_cache_from_file is not None:
4238 if old_branch_cache_from_file is not None:
4235 branchmap.branch_cache_from_file = old_branch_cache_from_file
4239 branchmap.branch_cache_from_file = old_branch_cache_from_file
4236 if branchcacheread is not None:
4240 if branchcacheread is not None:
4237 branchcacheread.restore()
4241 branchcacheread.restore()
4238 branchcachewrite.restore()
4242 branchcachewrite.restore()
4239 fm.end()
4243 fm.end()
4240
4244
4241
4245
4242 @command(
4246 @command(
4243 b'perf::branchmapupdate|perfbranchmapupdate',
4247 b'perf::branchmapupdate|perfbranchmapupdate',
4244 [
4248 [
4245 (b'', b'base', [], b'subset of revision to start from'),
4249 (b'', b'base', [], b'subset of revision to start from'),
4246 (b'', b'target', [], b'subset of revision to end with'),
4250 (b'', b'target', [], b'subset of revision to end with'),
4247 (b'', b'clear-caches', False, b'clear cache between each runs'),
4251 (b'', b'clear-caches', False, b'clear cache between each runs'),
4248 ]
4252 ]
4249 + formatteropts,
4253 + formatteropts,
4250 )
4254 )
4251 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4255 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4252 """benchmark branchmap update from for <base> revs to <target> revs
4256 """benchmark branchmap update from for <base> revs to <target> revs
4253
4257
4254 If `--clear-caches` is passed, the following items will be reset before
4258 If `--clear-caches` is passed, the following items will be reset before
4255 each update:
4259 each update:
4256 * the changelog instance and associated indexes
4260 * the changelog instance and associated indexes
4257 * the rev-branch-cache instance
4261 * the rev-branch-cache instance
4258
4262
4259 Examples:
4263 Examples:
4260
4264
4261 # update for the one last revision
4265 # update for the one last revision
4262 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4266 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4263
4267
4264 $ update for change coming with a new branch
4268 $ update for change coming with a new branch
4265 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4269 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4266 """
4270 """
4267 from mercurial import branchmap
4271 from mercurial import branchmap
4268 from mercurial import repoview
4272 from mercurial import repoview
4269
4273
4270 opts = _byteskwargs(opts)
4274 opts = _byteskwargs(opts)
4271 timer, fm = gettimer(ui, opts)
4275 timer, fm = gettimer(ui, opts)
4272 clearcaches = opts[b'clear_caches']
4276 clearcaches = opts[b'clear_caches']
4273 unfi = repo.unfiltered()
4277 unfi = repo.unfiltered()
4274 x = [None] # used to pass data between closure
4278 x = [None] # used to pass data between closure
4275
4279
4276 # we use a `list` here to avoid possible side effect from smartset
4280 # we use a `list` here to avoid possible side effect from smartset
4277 baserevs = list(scmutil.revrange(repo, base))
4281 baserevs = list(scmutil.revrange(repo, base))
4278 targetrevs = list(scmutil.revrange(repo, target))
4282 targetrevs = list(scmutil.revrange(repo, target))
4279 if not baserevs:
4283 if not baserevs:
4280 raise error.Abort(b'no revisions selected for --base')
4284 raise error.Abort(b'no revisions selected for --base')
4281 if not targetrevs:
4285 if not targetrevs:
4282 raise error.Abort(b'no revisions selected for --target')
4286 raise error.Abort(b'no revisions selected for --target')
4283
4287
4284 # make sure the target branchmap also contains the one in the base
4288 # make sure the target branchmap also contains the one in the base
4285 targetrevs = list(set(baserevs) | set(targetrevs))
4289 targetrevs = list(set(baserevs) | set(targetrevs))
4286 targetrevs.sort()
4290 targetrevs.sort()
4287
4291
4288 cl = repo.changelog
4292 cl = repo.changelog
4289 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4293 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4290 allbaserevs.sort()
4294 allbaserevs.sort()
4291 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4295 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4292
4296
4293 newrevs = list(alltargetrevs.difference(allbaserevs))
4297 newrevs = list(alltargetrevs.difference(allbaserevs))
4294 newrevs.sort()
4298 newrevs.sort()
4295
4299
4296 allrevs = frozenset(unfi.changelog.revs())
4300 allrevs = frozenset(unfi.changelog.revs())
4297 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4301 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4298 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4302 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4299
4303
4300 def basefilter(repo, visibilityexceptions=None):
4304 def basefilter(repo, visibilityexceptions=None):
4301 return basefilterrevs
4305 return basefilterrevs
4302
4306
4303 def targetfilter(repo, visibilityexceptions=None):
4307 def targetfilter(repo, visibilityexceptions=None):
4304 return targetfilterrevs
4308 return targetfilterrevs
4305
4309
4306 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4310 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4307 ui.status(msg % (len(allbaserevs), len(newrevs)))
4311 ui.status(msg % (len(allbaserevs), len(newrevs)))
4308 if targetfilterrevs:
4312 if targetfilterrevs:
4309 msg = b'(%d revisions still filtered)\n'
4313 msg = b'(%d revisions still filtered)\n'
4310 ui.status(msg % len(targetfilterrevs))
4314 ui.status(msg % len(targetfilterrevs))
4311
4315
4312 try:
4316 try:
4313 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4317 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4314 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4318 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4315
4319
4316 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4320 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4317 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4321 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4318
4322
4319 bcache = repo.branchmap()
4323 bcache = repo.branchmap()
4320 copy_method = 'copy'
4324 copy_method = 'copy'
4321
4325
4322 copy_base_kwargs = copy_base_kwargs = {}
4326 copy_base_kwargs = copy_base_kwargs = {}
4323 if hasattr(bcache, 'copy'):
4327 if hasattr(bcache, 'copy'):
4324 if 'repo' in getargspec(bcache.copy).args:
4328 if 'repo' in getargspec(bcache.copy).args:
4325 copy_base_kwargs = {"repo": baserepo}
4329 copy_base_kwargs = {"repo": baserepo}
4326 copy_target_kwargs = {"repo": targetrepo}
4330 copy_target_kwargs = {"repo": targetrepo}
4327 else:
4331 else:
4328 copy_method = 'inherit_for'
4332 copy_method = 'inherit_for'
4329 copy_base_kwargs = {"repo": baserepo}
4333 copy_base_kwargs = {"repo": baserepo}
4330 copy_target_kwargs = {"repo": targetrepo}
4334 copy_target_kwargs = {"repo": targetrepo}
4331
4335
4332 # try to find an existing branchmap to reuse
4336 # try to find an existing branchmap to reuse
4333 subsettable = getbranchmapsubsettable()
4337 subsettable = getbranchmapsubsettable()
4334 candidatefilter = subsettable.get(None)
4338 candidatefilter = subsettable.get(None)
4335 while candidatefilter is not None:
4339 while candidatefilter is not None:
4336 candidatebm = repo.filtered(candidatefilter).branchmap()
4340 candidatebm = repo.filtered(candidatefilter).branchmap()
4337 if candidatebm.validfor(baserepo):
4341 if candidatebm.validfor(baserepo):
4338 filtered = repoview.filterrevs(repo, candidatefilter)
4342 filtered = repoview.filterrevs(repo, candidatefilter)
4339 missing = [r for r in allbaserevs if r in filtered]
4343 missing = [r for r in allbaserevs if r in filtered]
4340 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4344 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4341 base.update(baserepo, missing)
4345 base.update(baserepo, missing)
4342 break
4346 break
4343 candidatefilter = subsettable.get(candidatefilter)
4347 candidatefilter = subsettable.get(candidatefilter)
4344 else:
4348 else:
4345 # no suitable subset where found
4349 # no suitable subset where found
4346 base = branchmap.branchcache()
4350 base = branchmap.branchcache()
4347 base.update(baserepo, allbaserevs)
4351 base.update(baserepo, allbaserevs)
4348
4352
4349 def setup():
4353 def setup():
4350 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4354 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4351 if clearcaches:
4355 if clearcaches:
4352 unfi._revbranchcache = None
4356 unfi._revbranchcache = None
4353 clearchangelog(repo)
4357 clearchangelog(repo)
4354
4358
4355 def bench():
4359 def bench():
4356 x[0].update(targetrepo, newrevs)
4360 x[0].update(targetrepo, newrevs)
4357
4361
4358 timer(bench, setup=setup)
4362 timer(bench, setup=setup)
4359 fm.end()
4363 fm.end()
4360 finally:
4364 finally:
4361 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4365 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4362 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4366 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4363
4367
4364
4368
4365 @command(
4369 @command(
4366 b'perf::branchmapload|perfbranchmapload',
4370 b'perf::branchmapload|perfbranchmapload',
4367 [
4371 [
4368 (b'f', b'filter', b'', b'Specify repoview filter'),
4372 (b'f', b'filter', b'', b'Specify repoview filter'),
4369 (b'', b'list', False, b'List brachmap filter caches'),
4373 (b'', b'list', False, b'List brachmap filter caches'),
4370 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4374 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4371 ]
4375 ]
4372 + formatteropts,
4376 + formatteropts,
4373 )
4377 )
4374 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4378 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4375 """benchmark reading the branchmap"""
4379 """benchmark reading the branchmap"""
4376 opts = _byteskwargs(opts)
4380 opts = _byteskwargs(opts)
4377 clearrevlogs = opts[b'clear_revlogs']
4381 clearrevlogs = opts[b'clear_revlogs']
4378
4382
4379 if list:
4383 if list:
4380 for name, kind, st in repo.cachevfs.readdir(stat=True):
4384 for name, kind, st in repo.cachevfs.readdir(stat=True):
4381 if name.startswith(b'branch2'):
4385 if name.startswith(b'branch2'):
4382 filtername = name.partition(b'-')[2] or b'unfiltered'
4386 filtername = name.partition(b'-')[2] or b'unfiltered'
4383 ui.status(
4387 ui.status(
4384 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4388 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4385 )
4389 )
4386 return
4390 return
4387 if not filter:
4391 if not filter:
4388 filter = None
4392 filter = None
4389 subsettable = getbranchmapsubsettable()
4393 subsettable = getbranchmapsubsettable()
4390 if filter is None:
4394 if filter is None:
4391 repo = repo.unfiltered()
4395 repo = repo.unfiltered()
4392 else:
4396 else:
4393 repo = repoview.repoview(repo, filter)
4397 repo = repoview.repoview(repo, filter)
4394
4398
4395 repo.branchmap() # make sure we have a relevant, up to date branchmap
4399 repo.branchmap() # make sure we have a relevant, up to date branchmap
4396
4400
4397 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4401 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4398 if fromfile is None:
4402 if fromfile is None:
4399 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4403 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4400 if fromfile is None:
4404 if fromfile is None:
4401 fromfile = branchmap.read
4405 fromfile = branchmap.read
4402
4406
4403 currentfilter = filter
4407 currentfilter = filter
4404 # try once without timer, the filter may not be cached
4408 # try once without timer, the filter may not be cached
4405 while fromfile(repo) is None:
4409 while fromfile(repo) is None:
4406 currentfilter = subsettable.get(currentfilter)
4410 currentfilter = subsettable.get(currentfilter)
4407 if currentfilter is None:
4411 if currentfilter is None:
4408 raise error.Abort(
4412 raise error.Abort(
4409 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4413 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4410 )
4414 )
4411 repo = repo.filtered(currentfilter)
4415 repo = repo.filtered(currentfilter)
4412 timer, fm = gettimer(ui, opts)
4416 timer, fm = gettimer(ui, opts)
4413
4417
4414 def setup():
4418 def setup():
4415 if clearrevlogs:
4419 if clearrevlogs:
4416 clearchangelog(repo)
4420 clearchangelog(repo)
4417
4421
4418 def bench():
4422 def bench():
4419 fromfile(repo)
4423 fromfile(repo)
4420
4424
4421 timer(bench, setup=setup)
4425 timer(bench, setup=setup)
4422 fm.end()
4426 fm.end()
4423
4427
4424
4428
4425 @command(b'perf::loadmarkers|perfloadmarkers')
4429 @command(b'perf::loadmarkers|perfloadmarkers')
4426 def perfloadmarkers(ui, repo):
4430 def perfloadmarkers(ui, repo):
4427 """benchmark the time to parse the on-disk markers for a repo
4431 """benchmark the time to parse the on-disk markers for a repo
4428
4432
4429 Result is the number of markers in the repo."""
4433 Result is the number of markers in the repo."""
4430 timer, fm = gettimer(ui)
4434 timer, fm = gettimer(ui)
4431 svfs = getsvfs(repo)
4435 svfs = getsvfs(repo)
4432 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4436 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4433 fm.end()
4437 fm.end()
4434
4438
4435
4439
4436 @command(
4440 @command(
4437 b'perf::lrucachedict|perflrucachedict',
4441 b'perf::lrucachedict|perflrucachedict',
4438 formatteropts
4442 formatteropts
4439 + [
4443 + [
4440 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4444 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4441 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4445 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4442 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4446 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4443 (b'', b'size', 4, b'size of cache'),
4447 (b'', b'size', 4, b'size of cache'),
4444 (b'', b'gets', 10000, b'number of key lookups'),
4448 (b'', b'gets', 10000, b'number of key lookups'),
4445 (b'', b'sets', 10000, b'number of key sets'),
4449 (b'', b'sets', 10000, b'number of key sets'),
4446 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4450 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4447 (
4451 (
4448 b'',
4452 b'',
4449 b'mixedgetfreq',
4453 b'mixedgetfreq',
4450 50,
4454 50,
4451 b'frequency of get vs set ops in mixed mode',
4455 b'frequency of get vs set ops in mixed mode',
4452 ),
4456 ),
4453 ],
4457 ],
4454 norepo=True,
4458 norepo=True,
4455 )
4459 )
4456 def perflrucache(
4460 def perflrucache(
4457 ui,
4461 ui,
4458 mincost=0,
4462 mincost=0,
4459 maxcost=100,
4463 maxcost=100,
4460 costlimit=0,
4464 costlimit=0,
4461 size=4,
4465 size=4,
4462 gets=10000,
4466 gets=10000,
4463 sets=10000,
4467 sets=10000,
4464 mixed=10000,
4468 mixed=10000,
4465 mixedgetfreq=50,
4469 mixedgetfreq=50,
4466 **opts
4470 **opts
4467 ):
4471 ):
4468 opts = _byteskwargs(opts)
4472 opts = _byteskwargs(opts)
4469
4473
4470 def doinit():
4474 def doinit():
4471 for i in _xrange(10000):
4475 for i in _xrange(10000):
4472 util.lrucachedict(size)
4476 util.lrucachedict(size)
4473
4477
4474 costrange = list(range(mincost, maxcost + 1))
4478 costrange = list(range(mincost, maxcost + 1))
4475
4479
4476 values = []
4480 values = []
4477 for i in _xrange(size):
4481 for i in _xrange(size):
4478 values.append(random.randint(0, _maxint))
4482 values.append(random.randint(0, _maxint))
4479
4483
4480 # Get mode fills the cache and tests raw lookup performance with no
4484 # Get mode fills the cache and tests raw lookup performance with no
4481 # eviction.
4485 # eviction.
4482 getseq = []
4486 getseq = []
4483 for i in _xrange(gets):
4487 for i in _xrange(gets):
4484 getseq.append(random.choice(values))
4488 getseq.append(random.choice(values))
4485
4489
4486 def dogets():
4490 def dogets():
4487 d = util.lrucachedict(size)
4491 d = util.lrucachedict(size)
4488 for v in values:
4492 for v in values:
4489 d[v] = v
4493 d[v] = v
4490 for key in getseq:
4494 for key in getseq:
4491 value = d[key]
4495 value = d[key]
4492 value # silence pyflakes warning
4496 value # silence pyflakes warning
4493
4497
4494 def dogetscost():
4498 def dogetscost():
4495 d = util.lrucachedict(size, maxcost=costlimit)
4499 d = util.lrucachedict(size, maxcost=costlimit)
4496 for i, v in enumerate(values):
4500 for i, v in enumerate(values):
4497 d.insert(v, v, cost=costs[i])
4501 d.insert(v, v, cost=costs[i])
4498 for key in getseq:
4502 for key in getseq:
4499 try:
4503 try:
4500 value = d[key]
4504 value = d[key]
4501 value # silence pyflakes warning
4505 value # silence pyflakes warning
4502 except KeyError:
4506 except KeyError:
4503 pass
4507 pass
4504
4508
4505 # Set mode tests insertion speed with cache eviction.
4509 # Set mode tests insertion speed with cache eviction.
4506 setseq = []
4510 setseq = []
4507 costs = []
4511 costs = []
4508 for i in _xrange(sets):
4512 for i in _xrange(sets):
4509 setseq.append(random.randint(0, _maxint))
4513 setseq.append(random.randint(0, _maxint))
4510 costs.append(random.choice(costrange))
4514 costs.append(random.choice(costrange))
4511
4515
4512 def doinserts():
4516 def doinserts():
4513 d = util.lrucachedict(size)
4517 d = util.lrucachedict(size)
4514 for v in setseq:
4518 for v in setseq:
4515 d.insert(v, v)
4519 d.insert(v, v)
4516
4520
4517 def doinsertscost():
4521 def doinsertscost():
4518 d = util.lrucachedict(size, maxcost=costlimit)
4522 d = util.lrucachedict(size, maxcost=costlimit)
4519 for i, v in enumerate(setseq):
4523 for i, v in enumerate(setseq):
4520 d.insert(v, v, cost=costs[i])
4524 d.insert(v, v, cost=costs[i])
4521
4525
4522 def dosets():
4526 def dosets():
4523 d = util.lrucachedict(size)
4527 d = util.lrucachedict(size)
4524 for v in setseq:
4528 for v in setseq:
4525 d[v] = v
4529 d[v] = v
4526
4530
4527 # Mixed mode randomly performs gets and sets with eviction.
4531 # Mixed mode randomly performs gets and sets with eviction.
4528 mixedops = []
4532 mixedops = []
4529 for i in _xrange(mixed):
4533 for i in _xrange(mixed):
4530 r = random.randint(0, 100)
4534 r = random.randint(0, 100)
4531 if r < mixedgetfreq:
4535 if r < mixedgetfreq:
4532 op = 0
4536 op = 0
4533 else:
4537 else:
4534 op = 1
4538 op = 1
4535
4539
4536 mixedops.append(
4540 mixedops.append(
4537 (op, random.randint(0, size * 2), random.choice(costrange))
4541 (op, random.randint(0, size * 2), random.choice(costrange))
4538 )
4542 )
4539
4543
4540 def domixed():
4544 def domixed():
4541 d = util.lrucachedict(size)
4545 d = util.lrucachedict(size)
4542
4546
4543 for op, v, cost in mixedops:
4547 for op, v, cost in mixedops:
4544 if op == 0:
4548 if op == 0:
4545 try:
4549 try:
4546 d[v]
4550 d[v]
4547 except KeyError:
4551 except KeyError:
4548 pass
4552 pass
4549 else:
4553 else:
4550 d[v] = v
4554 d[v] = v
4551
4555
4552 def domixedcost():
4556 def domixedcost():
4553 d = util.lrucachedict(size, maxcost=costlimit)
4557 d = util.lrucachedict(size, maxcost=costlimit)
4554
4558
4555 for op, v, cost in mixedops:
4559 for op, v, cost in mixedops:
4556 if op == 0:
4560 if op == 0:
4557 try:
4561 try:
4558 d[v]
4562 d[v]
4559 except KeyError:
4563 except KeyError:
4560 pass
4564 pass
4561 else:
4565 else:
4562 d.insert(v, v, cost=cost)
4566 d.insert(v, v, cost=cost)
4563
4567
4564 benches = [
4568 benches = [
4565 (doinit, b'init'),
4569 (doinit, b'init'),
4566 ]
4570 ]
4567
4571
4568 if costlimit:
4572 if costlimit:
4569 benches.extend(
4573 benches.extend(
4570 [
4574 [
4571 (dogetscost, b'gets w/ cost limit'),
4575 (dogetscost, b'gets w/ cost limit'),
4572 (doinsertscost, b'inserts w/ cost limit'),
4576 (doinsertscost, b'inserts w/ cost limit'),
4573 (domixedcost, b'mixed w/ cost limit'),
4577 (domixedcost, b'mixed w/ cost limit'),
4574 ]
4578 ]
4575 )
4579 )
4576 else:
4580 else:
4577 benches.extend(
4581 benches.extend(
4578 [
4582 [
4579 (dogets, b'gets'),
4583 (dogets, b'gets'),
4580 (doinserts, b'inserts'),
4584 (doinserts, b'inserts'),
4581 (dosets, b'sets'),
4585 (dosets, b'sets'),
4582 (domixed, b'mixed'),
4586 (domixed, b'mixed'),
4583 ]
4587 ]
4584 )
4588 )
4585
4589
4586 for fn, title in benches:
4590 for fn, title in benches:
4587 timer, fm = gettimer(ui, opts)
4591 timer, fm = gettimer(ui, opts)
4588 timer(fn, title=title)
4592 timer(fn, title=title)
4589 fm.end()
4593 fm.end()
4590
4594
4591
4595
4592 @command(
4596 @command(
4593 b'perf::write|perfwrite',
4597 b'perf::write|perfwrite',
4594 formatteropts
4598 formatteropts
4595 + [
4599 + [
4596 (b'', b'write-method', b'write', b'ui write method'),
4600 (b'', b'write-method', b'write', b'ui write method'),
4597 (b'', b'nlines', 100, b'number of lines'),
4601 (b'', b'nlines', 100, b'number of lines'),
4598 (b'', b'nitems', 100, b'number of items (per line)'),
4602 (b'', b'nitems', 100, b'number of items (per line)'),
4599 (b'', b'item', b'x', b'item that is written'),
4603 (b'', b'item', b'x', b'item that is written'),
4600 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4604 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4601 (b'', b'flush-line', None, b'flush after each line'),
4605 (b'', b'flush-line', None, b'flush after each line'),
4602 ],
4606 ],
4603 )
4607 )
4604 def perfwrite(ui, repo, **opts):
4608 def perfwrite(ui, repo, **opts):
4605 """microbenchmark ui.write (and others)"""
4609 """microbenchmark ui.write (and others)"""
4606 opts = _byteskwargs(opts)
4610 opts = _byteskwargs(opts)
4607
4611
4608 write = getattr(ui, _sysstr(opts[b'write_method']))
4612 write = getattr(ui, _sysstr(opts[b'write_method']))
4609 nlines = int(opts[b'nlines'])
4613 nlines = int(opts[b'nlines'])
4610 nitems = int(opts[b'nitems'])
4614 nitems = int(opts[b'nitems'])
4611 item = opts[b'item']
4615 item = opts[b'item']
4612 batch_line = opts.get(b'batch_line')
4616 batch_line = opts.get(b'batch_line')
4613 flush_line = opts.get(b'flush_line')
4617 flush_line = opts.get(b'flush_line')
4614
4618
4615 if batch_line:
4619 if batch_line:
4616 line = item * nitems + b'\n'
4620 line = item * nitems + b'\n'
4617
4621
4618 def benchmark():
4622 def benchmark():
4619 for i in pycompat.xrange(nlines):
4623 for i in pycompat.xrange(nlines):
4620 if batch_line:
4624 if batch_line:
4621 write(line)
4625 write(line)
4622 else:
4626 else:
4623 for i in pycompat.xrange(nitems):
4627 for i in pycompat.xrange(nitems):
4624 write(item)
4628 write(item)
4625 write(b'\n')
4629 write(b'\n')
4626 if flush_line:
4630 if flush_line:
4627 ui.flush()
4631 ui.flush()
4628 ui.flush()
4632 ui.flush()
4629
4633
4630 timer, fm = gettimer(ui, opts)
4634 timer, fm = gettimer(ui, opts)
4631 timer(benchmark)
4635 timer(benchmark)
4632 fm.end()
4636 fm.end()
4633
4637
4634
4638
4635 def uisetup(ui):
4639 def uisetup(ui):
4636 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4640 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4637 commands, b'debugrevlogopts'
4641 commands, b'debugrevlogopts'
4638 ):
4642 ):
4639 # for "historical portability":
4643 # for "historical portability":
4640 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4644 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4641 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4645 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4642 # openrevlog() should cause failure, because it has been
4646 # openrevlog() should cause failure, because it has been
4643 # available since 3.5 (or 49c583ca48c4).
4647 # available since 3.5 (or 49c583ca48c4).
4644 def openrevlog(orig, repo, cmd, file_, opts):
4648 def openrevlog(orig, repo, cmd, file_, opts):
4645 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4649 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4646 raise error.Abort(
4650 raise error.Abort(
4647 b"This version doesn't support --dir option",
4651 b"This version doesn't support --dir option",
4648 hint=b"use 3.5 or later",
4652 hint=b"use 3.5 or later",
4649 )
4653 )
4650 return orig(repo, cmd, file_, opts)
4654 return orig(repo, cmd, file_, opts)
4651
4655
4652 name = _sysstr(b'openrevlog')
4656 name = _sysstr(b'openrevlog')
4653 extensions.wrapfunction(cmdutil, name, openrevlog)
4657 extensions.wrapfunction(cmdutil, name, openrevlog)
4654
4658
4655
4659
4656 @command(
4660 @command(
4657 b'perf::progress|perfprogress',
4661 b'perf::progress|perfprogress',
4658 formatteropts
4662 formatteropts
4659 + [
4663 + [
4660 (b'', b'topic', b'topic', b'topic for progress messages'),
4664 (b'', b'topic', b'topic', b'topic for progress messages'),
4661 (b'c', b'total', 1000000, b'total value we are progressing to'),
4665 (b'c', b'total', 1000000, b'total value we are progressing to'),
4662 ],
4666 ],
4663 norepo=True,
4667 norepo=True,
4664 )
4668 )
4665 def perfprogress(ui, topic=None, total=None, **opts):
4669 def perfprogress(ui, topic=None, total=None, **opts):
4666 """printing of progress bars"""
4670 """printing of progress bars"""
4667 opts = _byteskwargs(opts)
4671 opts = _byteskwargs(opts)
4668
4672
4669 timer, fm = gettimer(ui, opts)
4673 timer, fm = gettimer(ui, opts)
4670
4674
4671 def doprogress():
4675 def doprogress():
4672 with ui.makeprogress(topic, total=total) as progress:
4676 with ui.makeprogress(topic, total=total) as progress:
4673 for i in _xrange(total):
4677 for i in _xrange(total):
4674 progress.increment()
4678 progress.increment()
4675
4679
4676 timer(doprogress)
4680 timer(doprogress)
4677 fm.end()
4681 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now