##// END OF EJS Templates
perf: add a --as-push option to perf::unbundle...
marmoute -
r52329:827b8971 default
parent child Browse files
Show More
@@ -1,4638 +1,4650 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", True)
459 displayall = ui.configbool(b"perf", b"all-timing", True)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
886 from mercurial import tags
887
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
889
890
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
892 from mercurial import tags
893
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
895
896
896
897 def _default_forget_fnodes(repo, revs):
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
899 fnodes cache"""
900 from mercurial import tags
900 from mercurial import tags
901
901
902 missing_1 = b'\xff' * 4
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
907 cache.write()
908
908
909
909
910 @command(
910 @command(
911 b'perf::tags|perftags',
911 b'perf::tags|perftags',
912 formatteropts
912 formatteropts
913 + [
913 + [
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
915 (
916 b'',
916 b'',
917 b'clear-on-disk-cache',
917 b'clear-on-disk-cache',
918 False,
918 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
919 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
920 ),
921 (
921 (
922 b'',
922 b'',
923 b'clear-fnode-cache-all',
923 b'clear-fnode-cache-all',
924 False,
924 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
925 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
926 ),
927 (
927 (
928 b'',
928 b'',
929 b'clear-fnode-cache-rev',
929 b'clear-fnode-cache-rev',
930 [],
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
932 b'REVS',
933 ),
933 ),
934 (
934 (
935 b'',
935 b'',
936 b'update-last',
936 b'update-last',
937 b'',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
939 b'N',
940 ),
940 ),
941 ],
941 ],
942 )
942 )
943 def perftags(ui, repo, **opts):
943 def perftags(ui, repo, **opts):
944 """Benchmark tags retrieval in various situation
944 """Benchmark tags retrieval in various situation
945
945
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 altering performance after the command was run. However, it does not
947 altering performance after the command was run. However, it does not
948 destroy any stored data.
948 destroy any stored data.
949 """
949 """
950 from mercurial import tags
950 from mercurial import tags
951
951
952 opts = _byteskwargs(opts)
952 opts = _byteskwargs(opts)
953 timer, fm = gettimer(ui, opts)
953 timer, fm = gettimer(ui, opts)
954 repocleartagscache = repocleartagscachefunc(repo)
954 repocleartagscache = repocleartagscachefunc(repo)
955 clearrevlogs = opts[b'clear_revlogs']
955 clearrevlogs = opts[b'clear_revlogs']
956 clear_disk = opts[b'clear_on_disk_cache']
956 clear_disk = opts[b'clear_on_disk_cache']
957 clear_fnode = opts[b'clear_fnode_cache_all']
957 clear_fnode = opts[b'clear_fnode_cache_all']
958
958
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
960 update_last_str = opts[b'update_last']
961 update_last = None
961 update_last = None
962 if update_last_str:
962 if update_last_str:
963 try:
963 try:
964 update_last = int(update_last_str)
964 update_last = int(update_last_str)
965 except ValueError:
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
967 msg %= update_last_str
968 hint = b'value should be an integer'
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
969 raise error.Abort(msg, hint=hint)
970
970
971 clear_disk_fn = getattr(
971 clear_disk_fn = getattr(
972 tags,
972 tags,
973 "clear_cache_on_disk",
973 "clear_cache_on_disk",
974 _default_clear_on_disk_tags_cache,
974 _default_clear_on_disk_tags_cache,
975 )
975 )
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
977 clear_fnodes_fn = tags.clear_cache_fnodes
977 clear_fnodes_fn = tags.clear_cache_fnodes
978 else:
978 else:
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
980 clear_fnodes_rev_fn = getattr(
980 clear_fnodes_rev_fn = getattr(
981 tags,
981 tags,
982 "forget_fnodes",
982 "forget_fnodes",
983 _default_forget_fnodes,
983 _default_forget_fnodes,
984 )
984 )
985
985
986 clear_revs = []
986 clear_revs = []
987 if clear_fnode_revs:
987 if clear_fnode_revs:
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
989
989
990 if update_last:
990 if update_last:
991 revset = b'last(all(), %d)' % update_last
991 revset = b'last(all(), %d)' % update_last
992 last_revs = repo.unfiltered().revs(revset)
992 last_revs = repo.unfiltered().revs(revset)
993 clear_revs.extend(last_revs)
993 clear_revs.extend(last_revs)
994
994
995 from mercurial import repoview
995 from mercurial import repoview
996
996
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 filter_id = repoview.extrafilter(repo.ui)
999 filter_id = repoview.extrafilter(repo.ui)
1000
1000
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 pre_repo = repo.filtered(filter_name)
1002 pre_repo = repo.filtered(filter_name)
1003 pre_repo.tags() # warm the cache
1003 pre_repo.tags() # warm the cache
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006
1006
1007 clear_revs = sorted(set(clear_revs))
1007 clear_revs = sorted(set(clear_revs))
1008
1008
1009 def s():
1009 def s():
1010 if update_last:
1010 if update_last:
1011 util.copyfile(old_tags_path, new_tags_path)
1011 util.copyfile(old_tags_path, new_tags_path)
1012 if clearrevlogs:
1012 if clearrevlogs:
1013 clearchangelog(repo)
1013 clearchangelog(repo)
1014 clearfilecache(repo.unfiltered(), 'manifest')
1014 clearfilecache(repo.unfiltered(), 'manifest')
1015 if clear_disk:
1015 if clear_disk:
1016 clear_disk_fn(repo)
1016 clear_disk_fn(repo)
1017 if clear_fnode:
1017 if clear_fnode:
1018 clear_fnodes_fn(repo)
1018 clear_fnodes_fn(repo)
1019 elif clear_revs:
1019 elif clear_revs:
1020 clear_fnodes_rev_fn(repo, clear_revs)
1020 clear_fnodes_rev_fn(repo, clear_revs)
1021 repocleartagscache()
1021 repocleartagscache()
1022
1022
1023 def t():
1023 def t():
1024 len(repo.tags())
1024 len(repo.tags())
1025
1025
1026 timer(t, setup=s)
1026 timer(t, setup=s)
1027 fm.end()
1027 fm.end()
1028
1028
1029
1029
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 def perfancestors(ui, repo, **opts):
1031 def perfancestors(ui, repo, **opts):
1032 opts = _byteskwargs(opts)
1032 opts = _byteskwargs(opts)
1033 timer, fm = gettimer(ui, opts)
1033 timer, fm = gettimer(ui, opts)
1034 heads = repo.changelog.headrevs()
1034 heads = repo.changelog.headrevs()
1035
1035
1036 def d():
1036 def d():
1037 for a in repo.changelog.ancestors(heads):
1037 for a in repo.changelog.ancestors(heads):
1038 pass
1038 pass
1039
1039
1040 timer(d)
1040 timer(d)
1041 fm.end()
1041 fm.end()
1042
1042
1043
1043
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 def perfancestorset(ui, repo, revset, **opts):
1045 def perfancestorset(ui, repo, revset, **opts):
1046 opts = _byteskwargs(opts)
1046 opts = _byteskwargs(opts)
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 revs = repo.revs(revset)
1048 revs = repo.revs(revset)
1049 heads = repo.changelog.headrevs()
1049 heads = repo.changelog.headrevs()
1050
1050
1051 def d():
1051 def d():
1052 s = repo.changelog.ancestors(heads)
1052 s = repo.changelog.ancestors(heads)
1053 for rev in revs:
1053 for rev in revs:
1054 rev in s
1054 rev in s
1055
1055
1056 timer(d)
1056 timer(d)
1057 fm.end()
1057 fm.end()
1058
1058
1059
1059
1060 @command(
1060 @command(
1061 b'perf::delta-find',
1061 b'perf::delta-find',
1062 revlogopts + formatteropts,
1062 revlogopts + formatteropts,
1063 b'-c|-m|FILE REV',
1063 b'-c|-m|FILE REV',
1064 )
1064 )
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 """benchmark the process of finding a valid delta for a revlog revision
1066 """benchmark the process of finding a valid delta for a revlog revision
1067
1067
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 This perf command measures how much time we spend in this process. It
1070 This perf command measures how much time we spend in this process. It
1071 operates on an already stored revision.
1071 operates on an already stored revision.
1072
1072
1073 See `hg help debug-delta-find` for another related command.
1073 See `hg help debug-delta-find` for another related command.
1074 """
1074 """
1075 from mercurial import revlogutils
1075 from mercurial import revlogutils
1076 import mercurial.revlogutils.deltas as deltautil
1076 import mercurial.revlogutils.deltas as deltautil
1077
1077
1078 opts = _byteskwargs(opts)
1078 opts = _byteskwargs(opts)
1079 if arg_2 is None:
1079 if arg_2 is None:
1080 file_ = None
1080 file_ = None
1081 rev = arg_1
1081 rev = arg_1
1082 else:
1082 else:
1083 file_ = arg_1
1083 file_ = arg_1
1084 rev = arg_2
1084 rev = arg_2
1085
1085
1086 repo = repo.unfiltered()
1086 repo = repo.unfiltered()
1087
1087
1088 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1089
1089
1090 rev = int(rev)
1090 rev = int(rev)
1091
1091
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093
1093
1094 deltacomputer = deltautil.deltacomputer(revlog)
1094 deltacomputer = deltautil.deltacomputer(revlog)
1095
1095
1096 node = revlog.node(rev)
1096 node = revlog.node(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1098 p1 = revlog.node(p1r)
1098 p1 = revlog.node(p1r)
1099 p2 = revlog.node(p2r)
1099 p2 = revlog.node(p2r)
1100 full_text = revlog.revision(rev)
1100 full_text = revlog.revision(rev)
1101 textlen = len(full_text)
1101 textlen = len(full_text)
1102 cachedelta = None
1102 cachedelta = None
1103 flags = revlog.flags(rev)
1103 flags = revlog.flags(rev)
1104
1104
1105 revinfo = revlogutils.revisioninfo(
1105 revinfo = revlogutils.revisioninfo(
1106 node,
1106 node,
1107 p1,
1107 p1,
1108 p2,
1108 p2,
1109 [full_text], # btext
1109 [full_text], # btext
1110 textlen,
1110 textlen,
1111 cachedelta,
1111 cachedelta,
1112 flags,
1112 flags,
1113 )
1113 )
1114
1114
1115 # Note: we should probably purge the potential caches (like the full
1115 # Note: we should probably purge the potential caches (like the full
1116 # manifest cache) between runs.
1116 # manifest cache) between runs.
1117 def find_one():
1117 def find_one():
1118 with revlog._datafp() as fh:
1118 with revlog._datafp() as fh:
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120
1120
1121 timer(find_one)
1121 timer(find_one)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 def perfdiscovery(ui, repo, path, **opts):
1126 def perfdiscovery(ui, repo, path, **opts):
1127 """benchmark discovery between local repo and the peer at given path"""
1127 """benchmark discovery between local repo and the peer at given path"""
1128 repos = [repo, None]
1128 repos = [repo, None]
1129 timer, fm = gettimer(ui, opts)
1129 timer, fm = gettimer(ui, opts)
1130
1130
1131 try:
1131 try:
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133
1133
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 except ImportError:
1135 except ImportError:
1136 try:
1136 try:
1137 from mercurial.utils.urlutil import get_unique_pull_path
1137 from mercurial.utils.urlutil import get_unique_pull_path
1138
1138
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 except ImportError:
1140 except ImportError:
1141 path = ui.expandpath(path)
1141 path = ui.expandpath(path)
1142
1142
1143 def s():
1143 def s():
1144 repos[1] = hg.peer(ui, opts, path)
1144 repos[1] = hg.peer(ui, opts, path)
1145
1145
1146 def d():
1146 def d():
1147 setdiscovery.findcommonheads(ui, *repos)
1147 setdiscovery.findcommonheads(ui, *repos)
1148
1148
1149 timer(d, setup=s)
1149 timer(d, setup=s)
1150 fm.end()
1150 fm.end()
1151
1151
1152
1152
1153 @command(
1153 @command(
1154 b'perf::bookmarks|perfbookmarks',
1154 b'perf::bookmarks|perfbookmarks',
1155 formatteropts
1155 formatteropts
1156 + [
1156 + [
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 ],
1158 ],
1159 )
1159 )
1160 def perfbookmarks(ui, repo, **opts):
1160 def perfbookmarks(ui, repo, **opts):
1161 """benchmark parsing bookmarks from disk to memory"""
1161 """benchmark parsing bookmarks from disk to memory"""
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1163 timer, fm = gettimer(ui, opts)
1164
1164
1165 clearrevlogs = opts[b'clear_revlogs']
1165 clearrevlogs = opts[b'clear_revlogs']
1166
1166
1167 def s():
1167 def s():
1168 if clearrevlogs:
1168 if clearrevlogs:
1169 clearchangelog(repo)
1169 clearchangelog(repo)
1170 clearfilecache(repo, b'_bookmarks')
1170 clearfilecache(repo, b'_bookmarks')
1171
1171
1172 def d():
1172 def d():
1173 repo._bookmarks
1173 repo._bookmarks
1174
1174
1175 timer(d, setup=s)
1175 timer(d, setup=s)
1176 fm.end()
1176 fm.end()
1177
1177
1178
1178
1179 @command(
1179 @command(
1180 b'perf::bundle',
1180 b'perf::bundle',
1181 [
1181 [
1182 (
1182 (
1183 b'r',
1183 b'r',
1184 b'rev',
1184 b'rev',
1185 [],
1185 [],
1186 b'changesets to bundle',
1186 b'changesets to bundle',
1187 b'REV',
1187 b'REV',
1188 ),
1188 ),
1189 (
1189 (
1190 b't',
1190 b't',
1191 b'type',
1191 b'type',
1192 b'none',
1192 b'none',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1194 b'TYPE',
1194 b'TYPE',
1195 ),
1195 ),
1196 ]
1196 ]
1197 + formatteropts,
1197 + formatteropts,
1198 b'REVS',
1198 b'REVS',
1199 )
1199 )
1200 def perfbundle(ui, repo, *revs, **opts):
1200 def perfbundle(ui, repo, *revs, **opts):
1201 """benchmark the creation of a bundle from a repository
1201 """benchmark the creation of a bundle from a repository
1202
1202
1203 For now, this only supports "none" compression.
1203 For now, this only supports "none" compression.
1204 """
1204 """
1205 try:
1205 try:
1206 from mercurial import bundlecaches
1206 from mercurial import bundlecaches
1207
1207
1208 parsebundlespec = bundlecaches.parsebundlespec
1208 parsebundlespec = bundlecaches.parsebundlespec
1209 except ImportError:
1209 except ImportError:
1210 from mercurial import exchange
1210 from mercurial import exchange
1211
1211
1212 parsebundlespec = exchange.parsebundlespec
1212 parsebundlespec = exchange.parsebundlespec
1213
1213
1214 from mercurial import discovery
1214 from mercurial import discovery
1215 from mercurial import bundle2
1215 from mercurial import bundle2
1216
1216
1217 opts = _byteskwargs(opts)
1217 opts = _byteskwargs(opts)
1218 timer, fm = gettimer(ui, opts)
1218 timer, fm = gettimer(ui, opts)
1219
1219
1220 cl = repo.changelog
1220 cl = repo.changelog
1221 revs = list(revs)
1221 revs = list(revs)
1222 revs.extend(opts.get(b'rev', ()))
1222 revs.extend(opts.get(b'rev', ()))
1223 revs = scmutil.revrange(repo, revs)
1223 revs = scmutil.revrange(repo, revs)
1224 if not revs:
1224 if not revs:
1225 raise error.Abort(b"not revision specified")
1225 raise error.Abort(b"not revision specified")
1226 # make it a consistent set (ie: without topological gaps)
1226 # make it a consistent set (ie: without topological gaps)
1227 old_len = len(revs)
1227 old_len = len(revs)
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 if old_len != len(revs):
1229 if old_len != len(revs):
1230 new_count = len(revs) - old_len
1230 new_count = len(revs) - old_len
1231 msg = b"add %d new revisions to make it a consistent set\n"
1231 msg = b"add %d new revisions to make it a consistent set\n"
1232 ui.write_err(msg % new_count)
1232 ui.write_err(msg % new_count)
1233
1233
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 outgoing = discovery.outgoing(repo, bases, targets)
1236 outgoing = discovery.outgoing(repo, bases, targets)
1237
1237
1238 bundle_spec = opts.get(b'type')
1238 bundle_spec = opts.get(b'type')
1239
1239
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241
1241
1242 cgversion = bundle_spec.params.get(b"cg.version")
1242 cgversion = bundle_spec.params.get(b"cg.version")
1243 if cgversion is None:
1243 if cgversion is None:
1244 if bundle_spec.version == b'v1':
1244 if bundle_spec.version == b'v1':
1245 cgversion = b'01'
1245 cgversion = b'01'
1246 if bundle_spec.version == b'v2':
1246 if bundle_spec.version == b'v2':
1247 cgversion = b'02'
1247 cgversion = b'02'
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 err = b"repository does not support bundle version %s"
1249 err = b"repository does not support bundle version %s"
1250 raise error.Abort(err % cgversion)
1250 raise error.Abort(err % cgversion)
1251
1251
1252 if cgversion == b'01': # bundle1
1252 if cgversion == b'01': # bundle1
1253 bversion = b'HG10' + bundle_spec.wirecompression
1253 bversion = b'HG10' + bundle_spec.wirecompression
1254 bcompression = None
1254 bcompression = None
1255 elif cgversion in (b'02', b'03'):
1255 elif cgversion in (b'02', b'03'):
1256 bversion = b'HG20'
1256 bversion = b'HG20'
1257 bcompression = bundle_spec.wirecompression
1257 bcompression = bundle_spec.wirecompression
1258 else:
1258 else:
1259 err = b'perf::bundle: unexpected changegroup version %s'
1259 err = b'perf::bundle: unexpected changegroup version %s'
1260 raise error.ProgrammingError(err % cgversion)
1260 raise error.ProgrammingError(err % cgversion)
1261
1261
1262 if bcompression is None:
1262 if bcompression is None:
1263 bcompression = b'UN'
1263 bcompression = b'UN'
1264
1264
1265 if bcompression != b'UN':
1265 if bcompression != b'UN':
1266 err = b'perf::bundle: compression currently unsupported: %s'
1266 err = b'perf::bundle: compression currently unsupported: %s'
1267 raise error.ProgrammingError(err % bcompression)
1267 raise error.ProgrammingError(err % bcompression)
1268
1268
1269 def do_bundle():
1269 def do_bundle():
1270 bundle2.writenewbundle(
1270 bundle2.writenewbundle(
1271 ui,
1271 ui,
1272 repo,
1272 repo,
1273 b'perf::bundle',
1273 b'perf::bundle',
1274 os.devnull,
1274 os.devnull,
1275 bversion,
1275 bversion,
1276 outgoing,
1276 outgoing,
1277 bundle_spec.params,
1277 bundle_spec.params,
1278 )
1278 )
1279
1279
1280 timer(do_bundle)
1280 timer(do_bundle)
1281 fm.end()
1281 fm.end()
1282
1282
1283
1283
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1286 """Benchmark reading of bundle files.
1286 """Benchmark reading of bundle files.
1287
1287
1288 This command is meant to isolate the I/O part of bundle reading as
1288 This command is meant to isolate the I/O part of bundle reading as
1289 much as possible.
1289 much as possible.
1290 """
1290 """
1291 from mercurial import (
1291 from mercurial import (
1292 bundle2,
1292 bundle2,
1293 exchange,
1293 exchange,
1294 streamclone,
1294 streamclone,
1295 )
1295 )
1296
1296
1297 opts = _byteskwargs(opts)
1297 opts = _byteskwargs(opts)
1298
1298
1299 def makebench(fn):
1299 def makebench(fn):
1300 def run():
1300 def run():
1301 with open(bundlepath, b'rb') as fh:
1301 with open(bundlepath, b'rb') as fh:
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 fn(bundle)
1303 fn(bundle)
1304
1304
1305 return run
1305 return run
1306
1306
1307 def makereadnbytes(size):
1307 def makereadnbytes(size):
1308 def run():
1308 def run():
1309 with open(bundlepath, b'rb') as fh:
1309 with open(bundlepath, b'rb') as fh:
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 while bundle.read(size):
1311 while bundle.read(size):
1312 pass
1312 pass
1313
1313
1314 return run
1314 return run
1315
1315
1316 def makestdioread(size):
1316 def makestdioread(size):
1317 def run():
1317 def run():
1318 with open(bundlepath, b'rb') as fh:
1318 with open(bundlepath, b'rb') as fh:
1319 while fh.read(size):
1319 while fh.read(size):
1320 pass
1320 pass
1321
1321
1322 return run
1322 return run
1323
1323
1324 # bundle1
1324 # bundle1
1325
1325
1326 def deltaiter(bundle):
1326 def deltaiter(bundle):
1327 for delta in bundle.deltaiter():
1327 for delta in bundle.deltaiter():
1328 pass
1328 pass
1329
1329
1330 def iterchunks(bundle):
1330 def iterchunks(bundle):
1331 for chunk in bundle.getchunks():
1331 for chunk in bundle.getchunks():
1332 pass
1332 pass
1333
1333
1334 # bundle2
1334 # bundle2
1335
1335
1336 def forwardchunks(bundle):
1336 def forwardchunks(bundle):
1337 for chunk in bundle._forwardchunks():
1337 for chunk in bundle._forwardchunks():
1338 pass
1338 pass
1339
1339
1340 def iterparts(bundle):
1340 def iterparts(bundle):
1341 for part in bundle.iterparts():
1341 for part in bundle.iterparts():
1342 pass
1342 pass
1343
1343
1344 def iterpartsseekable(bundle):
1344 def iterpartsseekable(bundle):
1345 for part in bundle.iterparts(seekable=True):
1345 for part in bundle.iterparts(seekable=True):
1346 pass
1346 pass
1347
1347
1348 def seek(bundle):
1348 def seek(bundle):
1349 for part in bundle.iterparts(seekable=True):
1349 for part in bundle.iterparts(seekable=True):
1350 part.seek(0, os.SEEK_END)
1350 part.seek(0, os.SEEK_END)
1351
1351
1352 def makepartreadnbytes(size):
1352 def makepartreadnbytes(size):
1353 def run():
1353 def run():
1354 with open(bundlepath, b'rb') as fh:
1354 with open(bundlepath, b'rb') as fh:
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 for part in bundle.iterparts():
1356 for part in bundle.iterparts():
1357 while part.read(size):
1357 while part.read(size):
1358 pass
1358 pass
1359
1359
1360 return run
1360 return run
1361
1361
1362 benches = [
1362 benches = [
1363 (makestdioread(8192), b'read(8k)'),
1363 (makestdioread(8192), b'read(8k)'),
1364 (makestdioread(16384), b'read(16k)'),
1364 (makestdioread(16384), b'read(16k)'),
1365 (makestdioread(32768), b'read(32k)'),
1365 (makestdioread(32768), b'read(32k)'),
1366 (makestdioread(131072), b'read(128k)'),
1366 (makestdioread(131072), b'read(128k)'),
1367 ]
1367 ]
1368
1368
1369 with open(bundlepath, b'rb') as fh:
1369 with open(bundlepath, b'rb') as fh:
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1371
1371
1372 if isinstance(bundle, changegroup.cg1unpacker):
1372 if isinstance(bundle, changegroup.cg1unpacker):
1373 benches.extend(
1373 benches.extend(
1374 [
1374 [
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 ]
1381 ]
1382 )
1382 )
1383 elif isinstance(bundle, bundle2.unbundle20):
1383 elif isinstance(bundle, bundle2.unbundle20):
1384 benches.extend(
1384 benches.extend(
1385 [
1385 [
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1388 (
1388 (
1389 makebench(iterpartsseekable),
1389 makebench(iterpartsseekable),
1390 b'bundle2 iterparts() seekable',
1390 b'bundle2 iterparts() seekable',
1391 ),
1391 ),
1392 (makebench(seek), b'bundle2 part seek()'),
1392 (makebench(seek), b'bundle2 part seek()'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 ]
1397 ]
1398 )
1398 )
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 raise error.Abort(b'stream clone bundles not supported')
1400 raise error.Abort(b'stream clone bundles not supported')
1401 else:
1401 else:
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403
1403
1404 for fn, title in benches:
1404 for fn, title in benches:
1405 timer, fm = gettimer(ui, opts)
1405 timer, fm = gettimer(ui, opts)
1406 timer(fn, title=title)
1406 timer(fn, title=title)
1407 fm.end()
1407 fm.end()
1408
1408
1409
1409
1410 @command(
1410 @command(
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 formatteropts
1412 formatteropts
1413 + [
1413 + [
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 ],
1416 ],
1417 )
1417 )
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 """Benchmark producing a changelog group for a changegroup.
1419 """Benchmark producing a changelog group for a changegroup.
1420
1420
1421 This measures the time spent processing the changelog during a
1421 This measures the time spent processing the changelog during a
1422 bundle operation. This occurs during `hg bundle` and on a server
1422 bundle operation. This occurs during `hg bundle` and on a server
1423 processing a `getbundle` wire protocol request (handles clones
1423 processing a `getbundle` wire protocol request (handles clones
1424 and pull requests).
1424 and pull requests).
1425
1425
1426 By default, all revisions are added to the changegroup.
1426 By default, all revisions are added to the changegroup.
1427 """
1427 """
1428 opts = _byteskwargs(opts)
1428 opts = _byteskwargs(opts)
1429 cl = repo.changelog
1429 cl = repo.changelog
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 bundler = changegroup.getbundler(cgversion, repo)
1431 bundler = changegroup.getbundler(cgversion, repo)
1432
1432
1433 def d():
1433 def d():
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1435 for chunk in chunks:
1435 for chunk in chunks:
1436 pass
1436 pass
1437
1437
1438 timer, fm = gettimer(ui, opts)
1438 timer, fm = gettimer(ui, opts)
1439
1439
1440 # Terminal printing can interfere with timing. So disable it.
1440 # Terminal printing can interfere with timing. So disable it.
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1442 timer(d)
1442 timer(d)
1443
1443
1444 fm.end()
1444 fm.end()
1445
1445
1446
1446
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1448 def perfdirs(ui, repo, **opts):
1448 def perfdirs(ui, repo, **opts):
1449 opts = _byteskwargs(opts)
1449 opts = _byteskwargs(opts)
1450 timer, fm = gettimer(ui, opts)
1450 timer, fm = gettimer(ui, opts)
1451 dirstate = repo.dirstate
1451 dirstate = repo.dirstate
1452 b'a' in dirstate
1452 b'a' in dirstate
1453
1453
1454 def d():
1454 def d():
1455 dirstate.hasdir(b'a')
1455 dirstate.hasdir(b'a')
1456 try:
1456 try:
1457 del dirstate._map._dirs
1457 del dirstate._map._dirs
1458 except AttributeError:
1458 except AttributeError:
1459 pass
1459 pass
1460
1460
1461 timer(d)
1461 timer(d)
1462 fm.end()
1462 fm.end()
1463
1463
1464
1464
1465 @command(
1465 @command(
1466 b'perf::dirstate|perfdirstate',
1466 b'perf::dirstate|perfdirstate',
1467 [
1467 [
1468 (
1468 (
1469 b'',
1469 b'',
1470 b'iteration',
1470 b'iteration',
1471 None,
1471 None,
1472 b'benchmark a full iteration for the dirstate',
1472 b'benchmark a full iteration for the dirstate',
1473 ),
1473 ),
1474 (
1474 (
1475 b'',
1475 b'',
1476 b'contains',
1476 b'contains',
1477 None,
1477 None,
1478 b'benchmark a large amount of `nf in dirstate` calls',
1478 b'benchmark a large amount of `nf in dirstate` calls',
1479 ),
1479 ),
1480 ]
1480 ]
1481 + formatteropts,
1481 + formatteropts,
1482 )
1482 )
1483 def perfdirstate(ui, repo, **opts):
1483 def perfdirstate(ui, repo, **opts):
1484 """benchmap the time of various distate operations
1484 """benchmap the time of various distate operations
1485
1485
1486 By default benchmark the time necessary to load a dirstate from scratch.
1486 By default benchmark the time necessary to load a dirstate from scratch.
1487 The dirstate is loaded to the point were a "contains" request can be
1487 The dirstate is loaded to the point were a "contains" request can be
1488 answered.
1488 answered.
1489 """
1489 """
1490 opts = _byteskwargs(opts)
1490 opts = _byteskwargs(opts)
1491 timer, fm = gettimer(ui, opts)
1491 timer, fm = gettimer(ui, opts)
1492 b"a" in repo.dirstate
1492 b"a" in repo.dirstate
1493
1493
1494 if opts[b'iteration'] and opts[b'contains']:
1494 if opts[b'iteration'] and opts[b'contains']:
1495 msg = b'only specify one of --iteration or --contains'
1495 msg = b'only specify one of --iteration or --contains'
1496 raise error.Abort(msg)
1496 raise error.Abort(msg)
1497
1497
1498 if opts[b'iteration']:
1498 if opts[b'iteration']:
1499 setup = None
1499 setup = None
1500 dirstate = repo.dirstate
1500 dirstate = repo.dirstate
1501
1501
1502 def d():
1502 def d():
1503 for f in dirstate:
1503 for f in dirstate:
1504 pass
1504 pass
1505
1505
1506 elif opts[b'contains']:
1506 elif opts[b'contains']:
1507 setup = None
1507 setup = None
1508 dirstate = repo.dirstate
1508 dirstate = repo.dirstate
1509 allfiles = list(dirstate)
1509 allfiles = list(dirstate)
1510 # also add file path that will be "missing" from the dirstate
1510 # also add file path that will be "missing" from the dirstate
1511 allfiles.extend([f[::-1] for f in allfiles])
1511 allfiles.extend([f[::-1] for f in allfiles])
1512
1512
1513 def d():
1513 def d():
1514 for f in allfiles:
1514 for f in allfiles:
1515 f in dirstate
1515 f in dirstate
1516
1516
1517 else:
1517 else:
1518
1518
1519 def setup():
1519 def setup():
1520 repo.dirstate.invalidate()
1520 repo.dirstate.invalidate()
1521
1521
1522 def d():
1522 def d():
1523 b"a" in repo.dirstate
1523 b"a" in repo.dirstate
1524
1524
1525 timer(d, setup=setup)
1525 timer(d, setup=setup)
1526 fm.end()
1526 fm.end()
1527
1527
1528
1528
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 def perfdirstatedirs(ui, repo, **opts):
1530 def perfdirstatedirs(ui, repo, **opts):
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 opts = _byteskwargs(opts)
1532 opts = _byteskwargs(opts)
1533 timer, fm = gettimer(ui, opts)
1533 timer, fm = gettimer(ui, opts)
1534 repo.dirstate.hasdir(b"a")
1534 repo.dirstate.hasdir(b"a")
1535
1535
1536 def setup():
1536 def setup():
1537 try:
1537 try:
1538 del repo.dirstate._map._dirs
1538 del repo.dirstate._map._dirs
1539 except AttributeError:
1539 except AttributeError:
1540 pass
1540 pass
1541
1541
1542 def d():
1542 def d():
1543 repo.dirstate.hasdir(b"a")
1543 repo.dirstate.hasdir(b"a")
1544
1544
1545 timer(d, setup=setup)
1545 timer(d, setup=setup)
1546 fm.end()
1546 fm.end()
1547
1547
1548
1548
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 def perfdirstatefoldmap(ui, repo, **opts):
1550 def perfdirstatefoldmap(ui, repo, **opts):
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1552
1552
1553 The dirstate filefoldmap cache is dropped between every request.
1553 The dirstate filefoldmap cache is dropped between every request.
1554 """
1554 """
1555 opts = _byteskwargs(opts)
1555 opts = _byteskwargs(opts)
1556 timer, fm = gettimer(ui, opts)
1556 timer, fm = gettimer(ui, opts)
1557 dirstate = repo.dirstate
1557 dirstate = repo.dirstate
1558 dirstate._map.filefoldmap.get(b'a')
1558 dirstate._map.filefoldmap.get(b'a')
1559
1559
1560 def setup():
1560 def setup():
1561 del dirstate._map.filefoldmap
1561 del dirstate._map.filefoldmap
1562
1562
1563 def d():
1563 def d():
1564 dirstate._map.filefoldmap.get(b'a')
1564 dirstate._map.filefoldmap.get(b'a')
1565
1565
1566 timer(d, setup=setup)
1566 timer(d, setup=setup)
1567 fm.end()
1567 fm.end()
1568
1568
1569
1569
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 def perfdirfoldmap(ui, repo, **opts):
1571 def perfdirfoldmap(ui, repo, **opts):
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573
1573
1574 The dirstate dirfoldmap cache is dropped between every request.
1574 The dirstate dirfoldmap cache is dropped between every request.
1575 """
1575 """
1576 opts = _byteskwargs(opts)
1576 opts = _byteskwargs(opts)
1577 timer, fm = gettimer(ui, opts)
1577 timer, fm = gettimer(ui, opts)
1578 dirstate = repo.dirstate
1578 dirstate = repo.dirstate
1579 dirstate._map.dirfoldmap.get(b'a')
1579 dirstate._map.dirfoldmap.get(b'a')
1580
1580
1581 def setup():
1581 def setup():
1582 del dirstate._map.dirfoldmap
1582 del dirstate._map.dirfoldmap
1583 try:
1583 try:
1584 del dirstate._map._dirs
1584 del dirstate._map._dirs
1585 except AttributeError:
1585 except AttributeError:
1586 pass
1586 pass
1587
1587
1588 def d():
1588 def d():
1589 dirstate._map.dirfoldmap.get(b'a')
1589 dirstate._map.dirfoldmap.get(b'a')
1590
1590
1591 timer(d, setup=setup)
1591 timer(d, setup=setup)
1592 fm.end()
1592 fm.end()
1593
1593
1594
1594
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 def perfdirstatewrite(ui, repo, **opts):
1596 def perfdirstatewrite(ui, repo, **opts):
1597 """benchmap the time it take to write a dirstate on disk"""
1597 """benchmap the time it take to write a dirstate on disk"""
1598 opts = _byteskwargs(opts)
1598 opts = _byteskwargs(opts)
1599 timer, fm = gettimer(ui, opts)
1599 timer, fm = gettimer(ui, opts)
1600 ds = repo.dirstate
1600 ds = repo.dirstate
1601 b"a" in ds
1601 b"a" in ds
1602
1602
1603 def setup():
1603 def setup():
1604 ds._dirty = True
1604 ds._dirty = True
1605
1605
1606 def d():
1606 def d():
1607 ds.write(repo.currenttransaction())
1607 ds.write(repo.currenttransaction())
1608
1608
1609 with repo.wlock():
1609 with repo.wlock():
1610 timer(d, setup=setup)
1610 timer(d, setup=setup)
1611 fm.end()
1611 fm.end()
1612
1612
1613
1613
1614 def _getmergerevs(repo, opts):
1614 def _getmergerevs(repo, opts):
1615 """parse command argument to return rev involved in merge
1615 """parse command argument to return rev involved in merge
1616
1616
1617 input: options dictionnary with `rev`, `from` and `bse`
1617 input: options dictionnary with `rev`, `from` and `bse`
1618 output: (localctx, otherctx, basectx)
1618 output: (localctx, otherctx, basectx)
1619 """
1619 """
1620 if opts[b'from']:
1620 if opts[b'from']:
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 wctx = repo[fromrev]
1622 wctx = repo[fromrev]
1623 else:
1623 else:
1624 wctx = repo[None]
1624 wctx = repo[None]
1625 # we don't want working dir files to be stat'd in the benchmark, so
1625 # we don't want working dir files to be stat'd in the benchmark, so
1626 # prime that cache
1626 # prime that cache
1627 wctx.dirty()
1627 wctx.dirty()
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 if opts[b'base']:
1629 if opts[b'base']:
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 ancestor = repo[fromrev]
1631 ancestor = repo[fromrev]
1632 else:
1632 else:
1633 ancestor = wctx.ancestor(rctx)
1633 ancestor = wctx.ancestor(rctx)
1634 return (wctx, rctx, ancestor)
1634 return (wctx, rctx, ancestor)
1635
1635
1636
1636
1637 @command(
1637 @command(
1638 b'perf::mergecalculate|perfmergecalculate',
1638 b'perf::mergecalculate|perfmergecalculate',
1639 [
1639 [
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1641 (b'', b'from', b'', b'rev to merge from'),
1641 (b'', b'from', b'', b'rev to merge from'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1643 ]
1643 ]
1644 + formatteropts,
1644 + formatteropts,
1645 )
1645 )
1646 def perfmergecalculate(ui, repo, **opts):
1646 def perfmergecalculate(ui, repo, **opts):
1647 opts = _byteskwargs(opts)
1647 opts = _byteskwargs(opts)
1648 timer, fm = gettimer(ui, opts)
1648 timer, fm = gettimer(ui, opts)
1649
1649
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651
1651
1652 def d():
1652 def d():
1653 # acceptremote is True because we don't want prompts in the middle of
1653 # acceptremote is True because we don't want prompts in the middle of
1654 # our benchmark
1654 # our benchmark
1655 merge.calculateupdates(
1655 merge.calculateupdates(
1656 repo,
1656 repo,
1657 wctx,
1657 wctx,
1658 rctx,
1658 rctx,
1659 [ancestor],
1659 [ancestor],
1660 branchmerge=False,
1660 branchmerge=False,
1661 force=False,
1661 force=False,
1662 acceptremote=True,
1662 acceptremote=True,
1663 followcopies=True,
1663 followcopies=True,
1664 )
1664 )
1665
1665
1666 timer(d)
1666 timer(d)
1667 fm.end()
1667 fm.end()
1668
1668
1669
1669
1670 @command(
1670 @command(
1671 b'perf::mergecopies|perfmergecopies',
1671 b'perf::mergecopies|perfmergecopies',
1672 [
1672 [
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1674 (b'', b'from', b'', b'rev to merge from'),
1674 (b'', b'from', b'', b'rev to merge from'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1676 ]
1676 ]
1677 + formatteropts,
1677 + formatteropts,
1678 )
1678 )
1679 def perfmergecopies(ui, repo, **opts):
1679 def perfmergecopies(ui, repo, **opts):
1680 """measure runtime of `copies.mergecopies`"""
1680 """measure runtime of `copies.mergecopies`"""
1681 opts = _byteskwargs(opts)
1681 opts = _byteskwargs(opts)
1682 timer, fm = gettimer(ui, opts)
1682 timer, fm = gettimer(ui, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684
1684
1685 def d():
1685 def d():
1686 # acceptremote is True because we don't want prompts in the middle of
1686 # acceptremote is True because we don't want prompts in the middle of
1687 # our benchmark
1687 # our benchmark
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1689
1689
1690 timer(d)
1690 timer(d)
1691 fm.end()
1691 fm.end()
1692
1692
1693
1693
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 """benchmark the copy tracing logic"""
1696 """benchmark the copy tracing logic"""
1697 opts = _byteskwargs(opts)
1697 opts = _byteskwargs(opts)
1698 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701
1701
1702 def d():
1702 def d():
1703 copies.pathcopies(ctx1, ctx2)
1703 copies.pathcopies(ctx1, ctx2)
1704
1704
1705 timer(d)
1705 timer(d)
1706 fm.end()
1706 fm.end()
1707
1707
1708
1708
1709 @command(
1709 @command(
1710 b'perf::phases|perfphases',
1710 b'perf::phases|perfphases',
1711 [
1711 [
1712 (b'', b'full', False, b'include file reading time too'),
1712 (b'', b'full', False, b'include file reading time too'),
1713 ],
1713 ],
1714 b"",
1714 b"",
1715 )
1715 )
1716 def perfphases(ui, repo, **opts):
1716 def perfphases(ui, repo, **opts):
1717 """benchmark phasesets computation"""
1717 """benchmark phasesets computation"""
1718 opts = _byteskwargs(opts)
1718 opts = _byteskwargs(opts)
1719 timer, fm = gettimer(ui, opts)
1719 timer, fm = gettimer(ui, opts)
1720 _phases = repo._phasecache
1720 _phases = repo._phasecache
1721 full = opts.get(b'full')
1721 full = opts.get(b'full')
1722 tip_rev = repo.changelog.tiprev()
1722 tip_rev = repo.changelog.tiprev()
1723
1723
1724 def d():
1724 def d():
1725 phases = _phases
1725 phases = _phases
1726 if full:
1726 if full:
1727 clearfilecache(repo, b'_phasecache')
1727 clearfilecache(repo, b'_phasecache')
1728 phases = repo._phasecache
1728 phases = repo._phasecache
1729 phases.invalidate()
1729 phases.invalidate()
1730 phases.phase(repo, tip_rev)
1730 phases.phase(repo, tip_rev)
1731
1731
1732 timer(d)
1732 timer(d)
1733 fm.end()
1733 fm.end()
1734
1734
1735
1735
1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1737 def perfphasesremote(ui, repo, dest=None, **opts):
1737 def perfphasesremote(ui, repo, dest=None, **opts):
1738 """benchmark time needed to analyse phases of the remote server"""
1738 """benchmark time needed to analyse phases of the remote server"""
1739 from mercurial.node import bin
1739 from mercurial.node import bin
1740 from mercurial import (
1740 from mercurial import (
1741 exchange,
1741 exchange,
1742 hg,
1742 hg,
1743 phases,
1743 phases,
1744 )
1744 )
1745
1745
1746 opts = _byteskwargs(opts)
1746 opts = _byteskwargs(opts)
1747 timer, fm = gettimer(ui, opts)
1747 timer, fm = gettimer(ui, opts)
1748
1748
1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1750 if not path:
1750 if not path:
1751 raise error.Abort(
1751 raise error.Abort(
1752 b'default repository not configured!',
1752 b'default repository not configured!',
1753 hint=b"see 'hg help config.paths'",
1753 hint=b"see 'hg help config.paths'",
1754 )
1754 )
1755 if util.safehasattr(path, 'main_path'):
1755 if util.safehasattr(path, 'main_path'):
1756 path = path.get_push_variant()
1756 path = path.get_push_variant()
1757 dest = path.loc
1757 dest = path.loc
1758 else:
1758 else:
1759 dest = path.pushloc or path.loc
1759 dest = path.pushloc or path.loc
1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1761 other = hg.peer(repo, opts, dest)
1761 other = hg.peer(repo, opts, dest)
1762
1762
1763 # easier to perform discovery through the operation
1763 # easier to perform discovery through the operation
1764 op = exchange.pushoperation(repo, other)
1764 op = exchange.pushoperation(repo, other)
1765 exchange._pushdiscoverychangeset(op)
1765 exchange._pushdiscoverychangeset(op)
1766
1766
1767 remotesubset = op.fallbackheads
1767 remotesubset = op.fallbackheads
1768
1768
1769 with other.commandexecutor() as e:
1769 with other.commandexecutor() as e:
1770 remotephases = e.callcommand(
1770 remotephases = e.callcommand(
1771 b'listkeys', {b'namespace': b'phases'}
1771 b'listkeys', {b'namespace': b'phases'}
1772 ).result()
1772 ).result()
1773 del other
1773 del other
1774 publishing = remotephases.get(b'publishing', False)
1774 publishing = remotephases.get(b'publishing', False)
1775 if publishing:
1775 if publishing:
1776 ui.statusnoi18n(b'publishing: yes\n')
1776 ui.statusnoi18n(b'publishing: yes\n')
1777 else:
1777 else:
1778 ui.statusnoi18n(b'publishing: no\n')
1778 ui.statusnoi18n(b'publishing: no\n')
1779
1779
1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1781 if has_node is None:
1781 if has_node is None:
1782 has_node = repo.changelog.nodemap.__contains__
1782 has_node = repo.changelog.nodemap.__contains__
1783 nonpublishroots = 0
1783 nonpublishroots = 0
1784 for nhex, phase in remotephases.iteritems():
1784 for nhex, phase in remotephases.iteritems():
1785 if nhex == b'publishing': # ignore data related to publish option
1785 if nhex == b'publishing': # ignore data related to publish option
1786 continue
1786 continue
1787 node = bin(nhex)
1787 node = bin(nhex)
1788 if has_node(node) and int(phase):
1788 if has_node(node) and int(phase):
1789 nonpublishroots += 1
1789 nonpublishroots += 1
1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1792
1792
1793 def d():
1793 def d():
1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1795
1795
1796 timer(d)
1796 timer(d)
1797 fm.end()
1797 fm.end()
1798
1798
1799
1799
1800 @command(
1800 @command(
1801 b'perf::manifest|perfmanifest',
1801 b'perf::manifest|perfmanifest',
1802 [
1802 [
1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1805 ]
1805 ]
1806 + formatteropts,
1806 + formatteropts,
1807 b'REV|NODE',
1807 b'REV|NODE',
1808 )
1808 )
1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1810 """benchmark the time to read a manifest from disk and return a usable
1810 """benchmark the time to read a manifest from disk and return a usable
1811 dict-like object
1811 dict-like object
1812
1812
1813 Manifest caches are cleared before retrieval."""
1813 Manifest caches are cleared before retrieval."""
1814 opts = _byteskwargs(opts)
1814 opts = _byteskwargs(opts)
1815 timer, fm = gettimer(ui, opts)
1815 timer, fm = gettimer(ui, opts)
1816 if not manifest_rev:
1816 if not manifest_rev:
1817 ctx = scmutil.revsingle(repo, rev, rev)
1817 ctx = scmutil.revsingle(repo, rev, rev)
1818 t = ctx.manifestnode()
1818 t = ctx.manifestnode()
1819 else:
1819 else:
1820 from mercurial.node import bin
1820 from mercurial.node import bin
1821
1821
1822 if len(rev) == 40:
1822 if len(rev) == 40:
1823 t = bin(rev)
1823 t = bin(rev)
1824 else:
1824 else:
1825 try:
1825 try:
1826 rev = int(rev)
1826 rev = int(rev)
1827
1827
1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1829 t = repo.manifestlog.getstorage(b'').node(rev)
1829 t = repo.manifestlog.getstorage(b'').node(rev)
1830 else:
1830 else:
1831 t = repo.manifestlog._revlog.lookup(rev)
1831 t = repo.manifestlog._revlog.lookup(rev)
1832 except ValueError:
1832 except ValueError:
1833 raise error.Abort(
1833 raise error.Abort(
1834 b'manifest revision must be integer or full node'
1834 b'manifest revision must be integer or full node'
1835 )
1835 )
1836
1836
1837 def d():
1837 def d():
1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1839 repo.manifestlog[t].read()
1839 repo.manifestlog[t].read()
1840
1840
1841 timer(d)
1841 timer(d)
1842 fm.end()
1842 fm.end()
1843
1843
1844
1844
1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1846 def perfchangeset(ui, repo, rev, **opts):
1846 def perfchangeset(ui, repo, rev, **opts):
1847 opts = _byteskwargs(opts)
1847 opts = _byteskwargs(opts)
1848 timer, fm = gettimer(ui, opts)
1848 timer, fm = gettimer(ui, opts)
1849 n = scmutil.revsingle(repo, rev).node()
1849 n = scmutil.revsingle(repo, rev).node()
1850
1850
1851 def d():
1851 def d():
1852 repo.changelog.read(n)
1852 repo.changelog.read(n)
1853 # repo.changelog._cache = None
1853 # repo.changelog._cache = None
1854
1854
1855 timer(d)
1855 timer(d)
1856 fm.end()
1856 fm.end()
1857
1857
1858
1858
1859 @command(b'perf::ignore|perfignore', formatteropts)
1859 @command(b'perf::ignore|perfignore', formatteropts)
1860 def perfignore(ui, repo, **opts):
1860 def perfignore(ui, repo, **opts):
1861 """benchmark operation related to computing ignore"""
1861 """benchmark operation related to computing ignore"""
1862 opts = _byteskwargs(opts)
1862 opts = _byteskwargs(opts)
1863 timer, fm = gettimer(ui, opts)
1863 timer, fm = gettimer(ui, opts)
1864 dirstate = repo.dirstate
1864 dirstate = repo.dirstate
1865
1865
1866 def setupone():
1866 def setupone():
1867 dirstate.invalidate()
1867 dirstate.invalidate()
1868 clearfilecache(dirstate, b'_ignore')
1868 clearfilecache(dirstate, b'_ignore')
1869
1869
1870 def runone():
1870 def runone():
1871 dirstate._ignore
1871 dirstate._ignore
1872
1872
1873 timer(runone, setup=setupone, title=b"load")
1873 timer(runone, setup=setupone, title=b"load")
1874 fm.end()
1874 fm.end()
1875
1875
1876
1876
1877 @command(
1877 @command(
1878 b'perf::index|perfindex',
1878 b'perf::index|perfindex',
1879 [
1879 [
1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1882 ]
1882 ]
1883 + formatteropts,
1883 + formatteropts,
1884 )
1884 )
1885 def perfindex(ui, repo, **opts):
1885 def perfindex(ui, repo, **opts):
1886 """benchmark index creation time followed by a lookup
1886 """benchmark index creation time followed by a lookup
1887
1887
1888 The default is to look `tip` up. Depending on the index implementation,
1888 The default is to look `tip` up. Depending on the index implementation,
1889 the revision looked up can matters. For example, an implementation
1889 the revision looked up can matters. For example, an implementation
1890 scanning the index will have a faster lookup time for `--rev tip` than for
1890 scanning the index will have a faster lookup time for `--rev tip` than for
1891 `--rev 0`. The number of looked up revisions and their order can also
1891 `--rev 0`. The number of looked up revisions and their order can also
1892 matters.
1892 matters.
1893
1893
1894 Example of useful set to test:
1894 Example of useful set to test:
1895
1895
1896 * tip
1896 * tip
1897 * 0
1897 * 0
1898 * -10:
1898 * -10:
1899 * :10
1899 * :10
1900 * -10: + :10
1900 * -10: + :10
1901 * :10: + -10:
1901 * :10: + -10:
1902 * -10000:
1902 * -10000:
1903 * -10000: + 0
1903 * -10000: + 0
1904
1904
1905 It is not currently possible to check for lookup of a missing node. For
1905 It is not currently possible to check for lookup of a missing node. For
1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1907 import mercurial.revlog
1907 import mercurial.revlog
1908
1908
1909 opts = _byteskwargs(opts)
1909 opts = _byteskwargs(opts)
1910 timer, fm = gettimer(ui, opts)
1910 timer, fm = gettimer(ui, opts)
1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1912 if opts[b'no_lookup']:
1912 if opts[b'no_lookup']:
1913 if opts['rev']:
1913 if opts['rev']:
1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1915 nodes = []
1915 nodes = []
1916 elif not opts[b'rev']:
1916 elif not opts[b'rev']:
1917 nodes = [repo[b"tip"].node()]
1917 nodes = [repo[b"tip"].node()]
1918 else:
1918 else:
1919 revs = scmutil.revrange(repo, opts[b'rev'])
1919 revs = scmutil.revrange(repo, opts[b'rev'])
1920 cl = repo.changelog
1920 cl = repo.changelog
1921 nodes = [cl.node(r) for r in revs]
1921 nodes = [cl.node(r) for r in revs]
1922
1922
1923 unfi = repo.unfiltered()
1923 unfi = repo.unfiltered()
1924 # find the filecache func directly
1924 # find the filecache func directly
1925 # This avoid polluting the benchmark with the filecache logic
1925 # This avoid polluting the benchmark with the filecache logic
1926 makecl = unfi.__class__.changelog.func
1926 makecl = unfi.__class__.changelog.func
1927
1927
1928 def setup():
1928 def setup():
1929 # probably not necessary, but for good measure
1929 # probably not necessary, but for good measure
1930 clearchangelog(unfi)
1930 clearchangelog(unfi)
1931
1931
1932 def d():
1932 def d():
1933 cl = makecl(unfi)
1933 cl = makecl(unfi)
1934 for n in nodes:
1934 for n in nodes:
1935 cl.rev(n)
1935 cl.rev(n)
1936
1936
1937 timer(d, setup=setup)
1937 timer(d, setup=setup)
1938 fm.end()
1938 fm.end()
1939
1939
1940
1940
1941 @command(
1941 @command(
1942 b'perf::nodemap|perfnodemap',
1942 b'perf::nodemap|perfnodemap',
1943 [
1943 [
1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1946 ]
1946 ]
1947 + formatteropts,
1947 + formatteropts,
1948 )
1948 )
1949 def perfnodemap(ui, repo, **opts):
1949 def perfnodemap(ui, repo, **opts):
1950 """benchmark the time necessary to look up revision from a cold nodemap
1950 """benchmark the time necessary to look up revision from a cold nodemap
1951
1951
1952 Depending on the implementation, the amount and order of revision we look
1952 Depending on the implementation, the amount and order of revision we look
1953 up can varies. Example of useful set to test:
1953 up can varies. Example of useful set to test:
1954 * tip
1954 * tip
1955 * 0
1955 * 0
1956 * -10:
1956 * -10:
1957 * :10
1957 * :10
1958 * -10: + :10
1958 * -10: + :10
1959 * :10: + -10:
1959 * :10: + -10:
1960 * -10000:
1960 * -10000:
1961 * -10000: + 0
1961 * -10000: + 0
1962
1962
1963 The command currently focus on valid binary lookup. Benchmarking for
1963 The command currently focus on valid binary lookup. Benchmarking for
1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1965 """
1965 """
1966 import mercurial.revlog
1966 import mercurial.revlog
1967
1967
1968 opts = _byteskwargs(opts)
1968 opts = _byteskwargs(opts)
1969 timer, fm = gettimer(ui, opts)
1969 timer, fm = gettimer(ui, opts)
1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1971
1971
1972 unfi = repo.unfiltered()
1972 unfi = repo.unfiltered()
1973 clearcaches = opts[b'clear_caches']
1973 clearcaches = opts[b'clear_caches']
1974 # find the filecache func directly
1974 # find the filecache func directly
1975 # This avoid polluting the benchmark with the filecache logic
1975 # This avoid polluting the benchmark with the filecache logic
1976 makecl = unfi.__class__.changelog.func
1976 makecl = unfi.__class__.changelog.func
1977 if not opts[b'rev']:
1977 if not opts[b'rev']:
1978 raise error.Abort(b'use --rev to specify revisions to look up')
1978 raise error.Abort(b'use --rev to specify revisions to look up')
1979 revs = scmutil.revrange(repo, opts[b'rev'])
1979 revs = scmutil.revrange(repo, opts[b'rev'])
1980 cl = repo.changelog
1980 cl = repo.changelog
1981 nodes = [cl.node(r) for r in revs]
1981 nodes = [cl.node(r) for r in revs]
1982
1982
1983 # use a list to pass reference to a nodemap from one closure to the next
1983 # use a list to pass reference to a nodemap from one closure to the next
1984 nodeget = [None]
1984 nodeget = [None]
1985
1985
1986 def setnodeget():
1986 def setnodeget():
1987 # probably not necessary, but for good measure
1987 # probably not necessary, but for good measure
1988 clearchangelog(unfi)
1988 clearchangelog(unfi)
1989 cl = makecl(unfi)
1989 cl = makecl(unfi)
1990 if util.safehasattr(cl.index, 'get_rev'):
1990 if util.safehasattr(cl.index, 'get_rev'):
1991 nodeget[0] = cl.index.get_rev
1991 nodeget[0] = cl.index.get_rev
1992 else:
1992 else:
1993 nodeget[0] = cl.nodemap.get
1993 nodeget[0] = cl.nodemap.get
1994
1994
1995 def d():
1995 def d():
1996 get = nodeget[0]
1996 get = nodeget[0]
1997 for n in nodes:
1997 for n in nodes:
1998 get(n)
1998 get(n)
1999
1999
2000 setup = None
2000 setup = None
2001 if clearcaches:
2001 if clearcaches:
2002
2002
2003 def setup():
2003 def setup():
2004 setnodeget()
2004 setnodeget()
2005
2005
2006 else:
2006 else:
2007 setnodeget()
2007 setnodeget()
2008 d() # prewarm the data structure
2008 d() # prewarm the data structure
2009 timer(d, setup=setup)
2009 timer(d, setup=setup)
2010 fm.end()
2010 fm.end()
2011
2011
2012
2012
2013 @command(b'perf::startup|perfstartup', formatteropts)
2013 @command(b'perf::startup|perfstartup', formatteropts)
2014 def perfstartup(ui, repo, **opts):
2014 def perfstartup(ui, repo, **opts):
2015 opts = _byteskwargs(opts)
2015 opts = _byteskwargs(opts)
2016 timer, fm = gettimer(ui, opts)
2016 timer, fm = gettimer(ui, opts)
2017
2017
2018 def d():
2018 def d():
2019 if os.name != 'nt':
2019 if os.name != 'nt':
2020 os.system(
2020 os.system(
2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2022 )
2022 )
2023 else:
2023 else:
2024 os.environ['HGRCPATH'] = r' '
2024 os.environ['HGRCPATH'] = r' '
2025 os.system("%s version -q > NUL" % sys.argv[0])
2025 os.system("%s version -q > NUL" % sys.argv[0])
2026
2026
2027 timer(d)
2027 timer(d)
2028 fm.end()
2028 fm.end()
2029
2029
2030
2030
2031 def _find_stream_generator(version):
2031 def _find_stream_generator(version):
2032 """find the proper generator function for this stream version"""
2032 """find the proper generator function for this stream version"""
2033 import mercurial.streamclone
2033 import mercurial.streamclone
2034
2034
2035 available = {}
2035 available = {}
2036
2036
2037 # try to fetch a v1 generator
2037 # try to fetch a v1 generator
2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2039 if generatev1 is not None:
2039 if generatev1 is not None:
2040
2040
2041 def generate(repo):
2041 def generate(repo):
2042 entries, bytes, data = generatev2(repo, None, None, True)
2042 entries, bytes, data = generatev2(repo, None, None, True)
2043 return data
2043 return data
2044
2044
2045 available[b'v1'] = generatev1
2045 available[b'v1'] = generatev1
2046 # try to fetch a v2 generator
2046 # try to fetch a v2 generator
2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2048 if generatev2 is not None:
2048 if generatev2 is not None:
2049
2049
2050 def generate(repo):
2050 def generate(repo):
2051 entries, bytes, data = generatev2(repo, None, None, True)
2051 entries, bytes, data = generatev2(repo, None, None, True)
2052 return data
2052 return data
2053
2053
2054 available[b'v2'] = generate
2054 available[b'v2'] = generate
2055 # try to fetch a v3 generator
2055 # try to fetch a v3 generator
2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2057 if generatev3 is not None:
2057 if generatev3 is not None:
2058
2058
2059 def generate(repo):
2059 def generate(repo):
2060 entries, bytes, data = generatev3(repo, None, None, True)
2060 entries, bytes, data = generatev3(repo, None, None, True)
2061 return data
2061 return data
2062
2062
2063 available[b'v3-exp'] = generate
2063 available[b'v3-exp'] = generate
2064
2064
2065 # resolve the request
2065 # resolve the request
2066 if version == b"latest":
2066 if version == b"latest":
2067 # latest is the highest non experimental version
2067 # latest is the highest non experimental version
2068 latest_key = max(v for v in available if b'-exp' not in v)
2068 latest_key = max(v for v in available if b'-exp' not in v)
2069 return available[latest_key]
2069 return available[latest_key]
2070 elif version in available:
2070 elif version in available:
2071 return available[version]
2071 return available[version]
2072 else:
2072 else:
2073 msg = b"unkown or unavailable version: %s"
2073 msg = b"unkown or unavailable version: %s"
2074 msg %= version
2074 msg %= version
2075 hint = b"available versions: %s"
2075 hint = b"available versions: %s"
2076 hint %= b', '.join(sorted(available))
2076 hint %= b', '.join(sorted(available))
2077 raise error.Abort(msg, hint=hint)
2077 raise error.Abort(msg, hint=hint)
2078
2078
2079
2079
2080 @command(
2080 @command(
2081 b'perf::stream-locked-section',
2081 b'perf::stream-locked-section',
2082 [
2082 [
2083 (
2083 (
2084 b'',
2084 b'',
2085 b'stream-version',
2085 b'stream-version',
2086 b'latest',
2086 b'latest',
2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2088 ),
2088 ),
2089 ]
2089 ]
2090 + formatteropts,
2090 + formatteropts,
2091 )
2091 )
2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2094
2094
2095 opts = _byteskwargs(opts)
2095 opts = _byteskwargs(opts)
2096 timer, fm = gettimer(ui, opts)
2096 timer, fm = gettimer(ui, opts)
2097
2097
2098 # deletion of the generator may trigger some cleanup that we do not want to
2098 # deletion of the generator may trigger some cleanup that we do not want to
2099 # measure
2099 # measure
2100 result_holder = [None]
2100 result_holder = [None]
2101
2101
2102 def setupone():
2102 def setupone():
2103 result_holder[0] = None
2103 result_holder[0] = None
2104
2104
2105 generate = _find_stream_generator(stream_version)
2105 generate = _find_stream_generator(stream_version)
2106
2106
2107 def runone():
2107 def runone():
2108 # the lock is held for the duration the initialisation
2108 # the lock is held for the duration the initialisation
2109 result_holder[0] = generate(repo)
2109 result_holder[0] = generate(repo)
2110
2110
2111 timer(runone, setup=setupone, title=b"load")
2111 timer(runone, setup=setupone, title=b"load")
2112 fm.end()
2112 fm.end()
2113
2113
2114
2114
2115 @command(
2115 @command(
2116 b'perf::stream-generate',
2116 b'perf::stream-generate',
2117 [
2117 [
2118 (
2118 (
2119 b'',
2119 b'',
2120 b'stream-version',
2120 b'stream-version',
2121 b'latest',
2121 b'latest',
2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2123 ),
2123 ),
2124 ]
2124 ]
2125 + formatteropts,
2125 + formatteropts,
2126 )
2126 )
2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2128 """benchmark the full generation of a stream clone"""
2128 """benchmark the full generation of a stream clone"""
2129
2129
2130 opts = _byteskwargs(opts)
2130 opts = _byteskwargs(opts)
2131 timer, fm = gettimer(ui, opts)
2131 timer, fm = gettimer(ui, opts)
2132
2132
2133 # deletion of the generator may trigger some cleanup that we do not want to
2133 # deletion of the generator may trigger some cleanup that we do not want to
2134 # measure
2134 # measure
2135
2135
2136 generate = _find_stream_generator(stream_version)
2136 generate = _find_stream_generator(stream_version)
2137
2137
2138 def runone():
2138 def runone():
2139 # the lock is held for the duration the initialisation
2139 # the lock is held for the duration the initialisation
2140 for chunk in generate(repo):
2140 for chunk in generate(repo):
2141 pass
2141 pass
2142
2142
2143 timer(runone, title=b"generate")
2143 timer(runone, title=b"generate")
2144 fm.end()
2144 fm.end()
2145
2145
2146
2146
2147 @command(
2147 @command(
2148 b'perf::stream-consume',
2148 b'perf::stream-consume',
2149 formatteropts,
2149 formatteropts,
2150 )
2150 )
2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2152 """benchmark the full application of a stream clone
2152 """benchmark the full application of a stream clone
2153
2153
2154 This include the creation of the repository
2154 This include the creation of the repository
2155 """
2155 """
2156 # try except to appease check code
2156 # try except to appease check code
2157 msg = b"mercurial too old, missing necessary module: %s"
2157 msg = b"mercurial too old, missing necessary module: %s"
2158 try:
2158 try:
2159 from mercurial import bundle2
2159 from mercurial import bundle2
2160 except ImportError as exc:
2160 except ImportError as exc:
2161 msg %= _bytestr(exc)
2161 msg %= _bytestr(exc)
2162 raise error.Abort(msg)
2162 raise error.Abort(msg)
2163 try:
2163 try:
2164 from mercurial import exchange
2164 from mercurial import exchange
2165 except ImportError as exc:
2165 except ImportError as exc:
2166 msg %= _bytestr(exc)
2166 msg %= _bytestr(exc)
2167 raise error.Abort(msg)
2167 raise error.Abort(msg)
2168 try:
2168 try:
2169 from mercurial import hg
2169 from mercurial import hg
2170 except ImportError as exc:
2170 except ImportError as exc:
2171 msg %= _bytestr(exc)
2171 msg %= _bytestr(exc)
2172 raise error.Abort(msg)
2172 raise error.Abort(msg)
2173 try:
2173 try:
2174 from mercurial import localrepo
2174 from mercurial import localrepo
2175 except ImportError as exc:
2175 except ImportError as exc:
2176 msg %= _bytestr(exc)
2176 msg %= _bytestr(exc)
2177 raise error.Abort(msg)
2177 raise error.Abort(msg)
2178
2178
2179 opts = _byteskwargs(opts)
2179 opts = _byteskwargs(opts)
2180 timer, fm = gettimer(ui, opts)
2180 timer, fm = gettimer(ui, opts)
2181
2181
2182 # deletion of the generator may trigger some cleanup that we do not want to
2182 # deletion of the generator may trigger some cleanup that we do not want to
2183 # measure
2183 # measure
2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2185 raise error.Abort("not a readable file: %s" % filename)
2185 raise error.Abort("not a readable file: %s" % filename)
2186
2186
2187 run_variables = [None, None]
2187 run_variables = [None, None]
2188
2188
2189 @contextlib.contextmanager
2189 @contextlib.contextmanager
2190 def context():
2190 def context():
2191 with open(filename, mode='rb') as bundle:
2191 with open(filename, mode='rb') as bundle:
2192 with tempfile.TemporaryDirectory() as tmp_dir:
2192 with tempfile.TemporaryDirectory() as tmp_dir:
2193 tmp_dir = fsencode(tmp_dir)
2193 tmp_dir = fsencode(tmp_dir)
2194 run_variables[0] = bundle
2194 run_variables[0] = bundle
2195 run_variables[1] = tmp_dir
2195 run_variables[1] = tmp_dir
2196 yield
2196 yield
2197 run_variables[0] = None
2197 run_variables[0] = None
2198 run_variables[1] = None
2198 run_variables[1] = None
2199
2199
2200 def runone():
2200 def runone():
2201 bundle = run_variables[0]
2201 bundle = run_variables[0]
2202 tmp_dir = run_variables[1]
2202 tmp_dir = run_variables[1]
2203 # only pass ui when no srcrepo
2203 # only pass ui when no srcrepo
2204 localrepo.createrepository(
2204 localrepo.createrepository(
2205 repo.ui, tmp_dir, requirements=repo.requirements
2205 repo.ui, tmp_dir, requirements=repo.requirements
2206 )
2206 )
2207 target = hg.repository(repo.ui, tmp_dir)
2207 target = hg.repository(repo.ui, tmp_dir)
2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2209 # stream v1
2209 # stream v1
2210 if util.safehasattr(gen, 'apply'):
2210 if util.safehasattr(gen, 'apply'):
2211 gen.apply(target)
2211 gen.apply(target)
2212 else:
2212 else:
2213 with target.transaction(b"perf::stream-consume") as tr:
2213 with target.transaction(b"perf::stream-consume") as tr:
2214 bundle2.applybundle(
2214 bundle2.applybundle(
2215 target,
2215 target,
2216 gen,
2216 gen,
2217 tr,
2217 tr,
2218 source=b'unbundle',
2218 source=b'unbundle',
2219 url=filename,
2219 url=filename,
2220 )
2220 )
2221
2221
2222 timer(runone, context=context, title=b"consume")
2222 timer(runone, context=context, title=b"consume")
2223 fm.end()
2223 fm.end()
2224
2224
2225
2225
2226 @command(b'perf::parents|perfparents', formatteropts)
2226 @command(b'perf::parents|perfparents', formatteropts)
2227 def perfparents(ui, repo, **opts):
2227 def perfparents(ui, repo, **opts):
2228 """benchmark the time necessary to fetch one changeset's parents.
2228 """benchmark the time necessary to fetch one changeset's parents.
2229
2229
2230 The fetch is done using the `node identifier`, traversing all object layers
2230 The fetch is done using the `node identifier`, traversing all object layers
2231 from the repository object. The first N revisions will be used for this
2231 from the repository object. The first N revisions will be used for this
2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2233 (default: 1000).
2233 (default: 1000).
2234 """
2234 """
2235 opts = _byteskwargs(opts)
2235 opts = _byteskwargs(opts)
2236 timer, fm = gettimer(ui, opts)
2236 timer, fm = gettimer(ui, opts)
2237 # control the number of commits perfparents iterates over
2237 # control the number of commits perfparents iterates over
2238 # experimental config: perf.parentscount
2238 # experimental config: perf.parentscount
2239 count = getint(ui, b"perf", b"parentscount", 1000)
2239 count = getint(ui, b"perf", b"parentscount", 1000)
2240 if len(repo.changelog) < count:
2240 if len(repo.changelog) < count:
2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2242 repo = repo.unfiltered()
2242 repo = repo.unfiltered()
2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2244
2244
2245 def d():
2245 def d():
2246 for n in nl:
2246 for n in nl:
2247 repo.changelog.parents(n)
2247 repo.changelog.parents(n)
2248
2248
2249 timer(d)
2249 timer(d)
2250 fm.end()
2250 fm.end()
2251
2251
2252
2252
2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2254 def perfctxfiles(ui, repo, x, **opts):
2254 def perfctxfiles(ui, repo, x, **opts):
2255 opts = _byteskwargs(opts)
2255 opts = _byteskwargs(opts)
2256 x = int(x)
2256 x = int(x)
2257 timer, fm = gettimer(ui, opts)
2257 timer, fm = gettimer(ui, opts)
2258
2258
2259 def d():
2259 def d():
2260 len(repo[x].files())
2260 len(repo[x].files())
2261
2261
2262 timer(d)
2262 timer(d)
2263 fm.end()
2263 fm.end()
2264
2264
2265
2265
2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2267 def perfrawfiles(ui, repo, x, **opts):
2267 def perfrawfiles(ui, repo, x, **opts):
2268 opts = _byteskwargs(opts)
2268 opts = _byteskwargs(opts)
2269 x = int(x)
2269 x = int(x)
2270 timer, fm = gettimer(ui, opts)
2270 timer, fm = gettimer(ui, opts)
2271 cl = repo.changelog
2271 cl = repo.changelog
2272
2272
2273 def d():
2273 def d():
2274 len(cl.read(x)[3])
2274 len(cl.read(x)[3])
2275
2275
2276 timer(d)
2276 timer(d)
2277 fm.end()
2277 fm.end()
2278
2278
2279
2279
2280 @command(b'perf::lookup|perflookup', formatteropts)
2280 @command(b'perf::lookup|perflookup', formatteropts)
2281 def perflookup(ui, repo, rev, **opts):
2281 def perflookup(ui, repo, rev, **opts):
2282 opts = _byteskwargs(opts)
2282 opts = _byteskwargs(opts)
2283 timer, fm = gettimer(ui, opts)
2283 timer, fm = gettimer(ui, opts)
2284 timer(lambda: len(repo.lookup(rev)))
2284 timer(lambda: len(repo.lookup(rev)))
2285 fm.end()
2285 fm.end()
2286
2286
2287
2287
2288 @command(
2288 @command(
2289 b'perf::linelogedits|perflinelogedits',
2289 b'perf::linelogedits|perflinelogedits',
2290 [
2290 [
2291 (b'n', b'edits', 10000, b'number of edits'),
2291 (b'n', b'edits', 10000, b'number of edits'),
2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2293 ],
2293 ],
2294 norepo=True,
2294 norepo=True,
2295 )
2295 )
2296 def perflinelogedits(ui, **opts):
2296 def perflinelogedits(ui, **opts):
2297 from mercurial import linelog
2297 from mercurial import linelog
2298
2298
2299 opts = _byteskwargs(opts)
2299 opts = _byteskwargs(opts)
2300
2300
2301 edits = opts[b'edits']
2301 edits = opts[b'edits']
2302 maxhunklines = opts[b'max_hunk_lines']
2302 maxhunklines = opts[b'max_hunk_lines']
2303
2303
2304 maxb1 = 100000
2304 maxb1 = 100000
2305 random.seed(0)
2305 random.seed(0)
2306 randint = random.randint
2306 randint = random.randint
2307 currentlines = 0
2307 currentlines = 0
2308 arglist = []
2308 arglist = []
2309 for rev in _xrange(edits):
2309 for rev in _xrange(edits):
2310 a1 = randint(0, currentlines)
2310 a1 = randint(0, currentlines)
2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2312 b1 = randint(0, maxb1)
2312 b1 = randint(0, maxb1)
2313 b2 = randint(b1, b1 + maxhunklines)
2313 b2 = randint(b1, b1 + maxhunklines)
2314 currentlines += (b2 - b1) - (a2 - a1)
2314 currentlines += (b2 - b1) - (a2 - a1)
2315 arglist.append((rev, a1, a2, b1, b2))
2315 arglist.append((rev, a1, a2, b1, b2))
2316
2316
2317 def d():
2317 def d():
2318 ll = linelog.linelog()
2318 ll = linelog.linelog()
2319 for args in arglist:
2319 for args in arglist:
2320 ll.replacelines(*args)
2320 ll.replacelines(*args)
2321
2321
2322 timer, fm = gettimer(ui, opts)
2322 timer, fm = gettimer(ui, opts)
2323 timer(d)
2323 timer(d)
2324 fm.end()
2324 fm.end()
2325
2325
2326
2326
2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2328 def perfrevrange(ui, repo, *specs, **opts):
2328 def perfrevrange(ui, repo, *specs, **opts):
2329 opts = _byteskwargs(opts)
2329 opts = _byteskwargs(opts)
2330 timer, fm = gettimer(ui, opts)
2330 timer, fm = gettimer(ui, opts)
2331 revrange = scmutil.revrange
2331 revrange = scmutil.revrange
2332 timer(lambda: len(revrange(repo, specs)))
2332 timer(lambda: len(revrange(repo, specs)))
2333 fm.end()
2333 fm.end()
2334
2334
2335
2335
2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2337 def perfnodelookup(ui, repo, rev, **opts):
2337 def perfnodelookup(ui, repo, rev, **opts):
2338 opts = _byteskwargs(opts)
2338 opts = _byteskwargs(opts)
2339 timer, fm = gettimer(ui, opts)
2339 timer, fm = gettimer(ui, opts)
2340 import mercurial.revlog
2340 import mercurial.revlog
2341
2341
2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2343 n = scmutil.revsingle(repo, rev).node()
2343 n = scmutil.revsingle(repo, rev).node()
2344
2344
2345 try:
2345 try:
2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2347 except TypeError:
2347 except TypeError:
2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2349
2349
2350 def d():
2350 def d():
2351 cl.rev(n)
2351 cl.rev(n)
2352 clearcaches(cl)
2352 clearcaches(cl)
2353
2353
2354 timer(d)
2354 timer(d)
2355 fm.end()
2355 fm.end()
2356
2356
2357
2357
2358 @command(
2358 @command(
2359 b'perf::log|perflog',
2359 b'perf::log|perflog',
2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2361 )
2361 )
2362 def perflog(ui, repo, rev=None, **opts):
2362 def perflog(ui, repo, rev=None, **opts):
2363 opts = _byteskwargs(opts)
2363 opts = _byteskwargs(opts)
2364 if rev is None:
2364 if rev is None:
2365 rev = []
2365 rev = []
2366 timer, fm = gettimer(ui, opts)
2366 timer, fm = gettimer(ui, opts)
2367 ui.pushbuffer()
2367 ui.pushbuffer()
2368 timer(
2368 timer(
2369 lambda: commands.log(
2369 lambda: commands.log(
2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2371 )
2371 )
2372 )
2372 )
2373 ui.popbuffer()
2373 ui.popbuffer()
2374 fm.end()
2374 fm.end()
2375
2375
2376
2376
2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2378 def perfmoonwalk(ui, repo, **opts):
2378 def perfmoonwalk(ui, repo, **opts):
2379 """benchmark walking the changelog backwards
2379 """benchmark walking the changelog backwards
2380
2380
2381 This also loads the changelog data for each revision in the changelog.
2381 This also loads the changelog data for each revision in the changelog.
2382 """
2382 """
2383 opts = _byteskwargs(opts)
2383 opts = _byteskwargs(opts)
2384 timer, fm = gettimer(ui, opts)
2384 timer, fm = gettimer(ui, opts)
2385
2385
2386 def moonwalk():
2386 def moonwalk():
2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2388 ctx = repo[i]
2388 ctx = repo[i]
2389 ctx.branch() # read changelog data (in addition to the index)
2389 ctx.branch() # read changelog data (in addition to the index)
2390
2390
2391 timer(moonwalk)
2391 timer(moonwalk)
2392 fm.end()
2392 fm.end()
2393
2393
2394
2394
2395 @command(
2395 @command(
2396 b'perf::templating|perftemplating',
2396 b'perf::templating|perftemplating',
2397 [
2397 [
2398 (b'r', b'rev', [], b'revisions to run the template on'),
2398 (b'r', b'rev', [], b'revisions to run the template on'),
2399 ]
2399 ]
2400 + formatteropts,
2400 + formatteropts,
2401 )
2401 )
2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2403 """test the rendering time of a given template"""
2403 """test the rendering time of a given template"""
2404 if makelogtemplater is None:
2404 if makelogtemplater is None:
2405 raise error.Abort(
2405 raise error.Abort(
2406 b"perftemplating not available with this Mercurial",
2406 b"perftemplating not available with this Mercurial",
2407 hint=b"use 4.3 or later",
2407 hint=b"use 4.3 or later",
2408 )
2408 )
2409
2409
2410 opts = _byteskwargs(opts)
2410 opts = _byteskwargs(opts)
2411
2411
2412 nullui = ui.copy()
2412 nullui = ui.copy()
2413 nullui.fout = open(os.devnull, 'wb')
2413 nullui.fout = open(os.devnull, 'wb')
2414 nullui.disablepager()
2414 nullui.disablepager()
2415 revs = opts.get(b'rev')
2415 revs = opts.get(b'rev')
2416 if not revs:
2416 if not revs:
2417 revs = [b'all()']
2417 revs = [b'all()']
2418 revs = list(scmutil.revrange(repo, revs))
2418 revs = list(scmutil.revrange(repo, revs))
2419
2419
2420 defaulttemplate = (
2420 defaulttemplate = (
2421 b'{date|shortdate} [{rev}:{node|short}]'
2421 b'{date|shortdate} [{rev}:{node|short}]'
2422 b' {author|person}: {desc|firstline}\n'
2422 b' {author|person}: {desc|firstline}\n'
2423 )
2423 )
2424 if testedtemplate is None:
2424 if testedtemplate is None:
2425 testedtemplate = defaulttemplate
2425 testedtemplate = defaulttemplate
2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2427
2427
2428 def format():
2428 def format():
2429 for r in revs:
2429 for r in revs:
2430 ctx = repo[r]
2430 ctx = repo[r]
2431 displayer.show(ctx)
2431 displayer.show(ctx)
2432 displayer.flush(ctx)
2432 displayer.flush(ctx)
2433
2433
2434 timer, fm = gettimer(ui, opts)
2434 timer, fm = gettimer(ui, opts)
2435 timer(format)
2435 timer(format)
2436 fm.end()
2436 fm.end()
2437
2437
2438
2438
2439 def _displaystats(ui, opts, entries, data):
2439 def _displaystats(ui, opts, entries, data):
2440 # use a second formatter because the data are quite different, not sure
2440 # use a second formatter because the data are quite different, not sure
2441 # how it flies with the templater.
2441 # how it flies with the templater.
2442 fm = ui.formatter(b'perf-stats', opts)
2442 fm = ui.formatter(b'perf-stats', opts)
2443 for key, title in entries:
2443 for key, title in entries:
2444 values = data[key]
2444 values = data[key]
2445 nbvalues = len(data)
2445 nbvalues = len(data)
2446 values.sort()
2446 values.sort()
2447 stats = {
2447 stats = {
2448 'key': key,
2448 'key': key,
2449 'title': title,
2449 'title': title,
2450 'nbitems': len(values),
2450 'nbitems': len(values),
2451 'min': values[0][0],
2451 'min': values[0][0],
2452 '10%': values[(nbvalues * 10) // 100][0],
2452 '10%': values[(nbvalues * 10) // 100][0],
2453 '25%': values[(nbvalues * 25) // 100][0],
2453 '25%': values[(nbvalues * 25) // 100][0],
2454 '50%': values[(nbvalues * 50) // 100][0],
2454 '50%': values[(nbvalues * 50) // 100][0],
2455 '75%': values[(nbvalues * 75) // 100][0],
2455 '75%': values[(nbvalues * 75) // 100][0],
2456 '80%': values[(nbvalues * 80) // 100][0],
2456 '80%': values[(nbvalues * 80) // 100][0],
2457 '85%': values[(nbvalues * 85) // 100][0],
2457 '85%': values[(nbvalues * 85) // 100][0],
2458 '90%': values[(nbvalues * 90) // 100][0],
2458 '90%': values[(nbvalues * 90) // 100][0],
2459 '95%': values[(nbvalues * 95) // 100][0],
2459 '95%': values[(nbvalues * 95) // 100][0],
2460 '99%': values[(nbvalues * 99) // 100][0],
2460 '99%': values[(nbvalues * 99) // 100][0],
2461 'max': values[-1][0],
2461 'max': values[-1][0],
2462 }
2462 }
2463 fm.startitem()
2463 fm.startitem()
2464 fm.data(**stats)
2464 fm.data(**stats)
2465 # make node pretty for the human output
2465 # make node pretty for the human output
2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2467 lines = [
2467 lines = [
2468 'min',
2468 'min',
2469 '10%',
2469 '10%',
2470 '25%',
2470 '25%',
2471 '50%',
2471 '50%',
2472 '75%',
2472 '75%',
2473 '80%',
2473 '80%',
2474 '85%',
2474 '85%',
2475 '90%',
2475 '90%',
2476 '95%',
2476 '95%',
2477 '99%',
2477 '99%',
2478 'max',
2478 'max',
2479 ]
2479 ]
2480 for l in lines:
2480 for l in lines:
2481 fm.plain('%s: %s\n' % (l, stats[l]))
2481 fm.plain('%s: %s\n' % (l, stats[l]))
2482 fm.end()
2482 fm.end()
2483
2483
2484
2484
2485 @command(
2485 @command(
2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2487 formatteropts
2487 formatteropts
2488 + [
2488 + [
2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2490 (b'', b'timing', False, b'provides extra data (costly)'),
2490 (b'', b'timing', False, b'provides extra data (costly)'),
2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2492 ],
2492 ],
2493 )
2493 )
2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2495 """find statistics about potential parameters for `perfmergecopies`
2495 """find statistics about potential parameters for `perfmergecopies`
2496
2496
2497 This command find (base, p1, p2) triplet relevant for copytracing
2497 This command find (base, p1, p2) triplet relevant for copytracing
2498 benchmarking in the context of a merge. It reports values for some of the
2498 benchmarking in the context of a merge. It reports values for some of the
2499 parameters that impact merge copy tracing time during merge.
2499 parameters that impact merge copy tracing time during merge.
2500
2500
2501 If `--timing` is set, rename detection is run and the associated timing
2501 If `--timing` is set, rename detection is run and the associated timing
2502 will be reported. The extra details come at the cost of slower command
2502 will be reported. The extra details come at the cost of slower command
2503 execution.
2503 execution.
2504
2504
2505 Since rename detection is only run once, other factors might easily
2505 Since rename detection is only run once, other factors might easily
2506 affect the precision of the timing. However it should give a good
2506 affect the precision of the timing. However it should give a good
2507 approximation of which revision triplets are very costly.
2507 approximation of which revision triplets are very costly.
2508 """
2508 """
2509 opts = _byteskwargs(opts)
2509 opts = _byteskwargs(opts)
2510 fm = ui.formatter(b'perf', opts)
2510 fm = ui.formatter(b'perf', opts)
2511 dotiming = opts[b'timing']
2511 dotiming = opts[b'timing']
2512 dostats = opts[b'stats']
2512 dostats = opts[b'stats']
2513
2513
2514 output_template = [
2514 output_template = [
2515 ("base", "%(base)12s"),
2515 ("base", "%(base)12s"),
2516 ("p1", "%(p1.node)12s"),
2516 ("p1", "%(p1.node)12s"),
2517 ("p2", "%(p2.node)12s"),
2517 ("p2", "%(p2.node)12s"),
2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2521 ("p1.time", "%(p1.time)12.3f"),
2521 ("p1.time", "%(p1.time)12.3f"),
2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2525 ("p2.time", "%(p2.time)12.3f"),
2525 ("p2.time", "%(p2.time)12.3f"),
2526 ("renames", "%(nbrenamedfiles)12d"),
2526 ("renames", "%(nbrenamedfiles)12d"),
2527 ("total.time", "%(time)12.3f"),
2527 ("total.time", "%(time)12.3f"),
2528 ]
2528 ]
2529 if not dotiming:
2529 if not dotiming:
2530 output_template = [
2530 output_template = [
2531 i
2531 i
2532 for i in output_template
2532 for i in output_template
2533 if not ('time' in i[0] or 'renames' in i[0])
2533 if not ('time' in i[0] or 'renames' in i[0])
2534 ]
2534 ]
2535 header_names = [h for (h, v) in output_template]
2535 header_names = [h for (h, v) in output_template]
2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2538 fm.plain(header % tuple(header_names))
2538 fm.plain(header % tuple(header_names))
2539
2539
2540 if not revs:
2540 if not revs:
2541 revs = ['all()']
2541 revs = ['all()']
2542 revs = scmutil.revrange(repo, revs)
2542 revs = scmutil.revrange(repo, revs)
2543
2543
2544 if dostats:
2544 if dostats:
2545 alldata = {
2545 alldata = {
2546 'nbrevs': [],
2546 'nbrevs': [],
2547 'nbmissingfiles': [],
2547 'nbmissingfiles': [],
2548 }
2548 }
2549 if dotiming:
2549 if dotiming:
2550 alldata['parentnbrenames'] = []
2550 alldata['parentnbrenames'] = []
2551 alldata['totalnbrenames'] = []
2551 alldata['totalnbrenames'] = []
2552 alldata['parenttime'] = []
2552 alldata['parenttime'] = []
2553 alldata['totaltime'] = []
2553 alldata['totaltime'] = []
2554
2554
2555 roi = repo.revs('merge() and %ld', revs)
2555 roi = repo.revs('merge() and %ld', revs)
2556 for r in roi:
2556 for r in roi:
2557 ctx = repo[r]
2557 ctx = repo[r]
2558 p1 = ctx.p1()
2558 p1 = ctx.p1()
2559 p2 = ctx.p2()
2559 p2 = ctx.p2()
2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2561 for b in bases:
2561 for b in bases:
2562 b = repo[b]
2562 b = repo[b]
2563 p1missing = copies._computeforwardmissing(b, p1)
2563 p1missing = copies._computeforwardmissing(b, p1)
2564 p2missing = copies._computeforwardmissing(b, p2)
2564 p2missing = copies._computeforwardmissing(b, p2)
2565 data = {
2565 data = {
2566 b'base': b.hex(),
2566 b'base': b.hex(),
2567 b'p1.node': p1.hex(),
2567 b'p1.node': p1.hex(),
2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2569 b'p1.nbmissingfiles': len(p1missing),
2569 b'p1.nbmissingfiles': len(p1missing),
2570 b'p2.node': p2.hex(),
2570 b'p2.node': p2.hex(),
2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2572 b'p2.nbmissingfiles': len(p2missing),
2572 b'p2.nbmissingfiles': len(p2missing),
2573 }
2573 }
2574 if dostats:
2574 if dostats:
2575 if p1missing:
2575 if p1missing:
2576 alldata['nbrevs'].append(
2576 alldata['nbrevs'].append(
2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2578 )
2578 )
2579 alldata['nbmissingfiles'].append(
2579 alldata['nbmissingfiles'].append(
2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2581 )
2581 )
2582 if p2missing:
2582 if p2missing:
2583 alldata['nbrevs'].append(
2583 alldata['nbrevs'].append(
2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2585 )
2585 )
2586 alldata['nbmissingfiles'].append(
2586 alldata['nbmissingfiles'].append(
2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2588 )
2588 )
2589 if dotiming:
2589 if dotiming:
2590 begin = util.timer()
2590 begin = util.timer()
2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2592 end = util.timer()
2592 end = util.timer()
2593 # not very stable timing since we did only one run
2593 # not very stable timing since we did only one run
2594 data['time'] = end - begin
2594 data['time'] = end - begin
2595 # mergedata contains five dicts: "copy", "movewithdir",
2595 # mergedata contains five dicts: "copy", "movewithdir",
2596 # "diverge", "renamedelete" and "dirmove".
2596 # "diverge", "renamedelete" and "dirmove".
2597 # The first 4 are about renamed file so lets count that.
2597 # The first 4 are about renamed file so lets count that.
2598 renames = len(mergedata[0])
2598 renames = len(mergedata[0])
2599 renames += len(mergedata[1])
2599 renames += len(mergedata[1])
2600 renames += len(mergedata[2])
2600 renames += len(mergedata[2])
2601 renames += len(mergedata[3])
2601 renames += len(mergedata[3])
2602 data['nbrenamedfiles'] = renames
2602 data['nbrenamedfiles'] = renames
2603 begin = util.timer()
2603 begin = util.timer()
2604 p1renames = copies.pathcopies(b, p1)
2604 p1renames = copies.pathcopies(b, p1)
2605 end = util.timer()
2605 end = util.timer()
2606 data['p1.time'] = end - begin
2606 data['p1.time'] = end - begin
2607 begin = util.timer()
2607 begin = util.timer()
2608 p2renames = copies.pathcopies(b, p2)
2608 p2renames = copies.pathcopies(b, p2)
2609 end = util.timer()
2609 end = util.timer()
2610 data['p2.time'] = end - begin
2610 data['p2.time'] = end - begin
2611 data['p1.renamedfiles'] = len(p1renames)
2611 data['p1.renamedfiles'] = len(p1renames)
2612 data['p2.renamedfiles'] = len(p2renames)
2612 data['p2.renamedfiles'] = len(p2renames)
2613
2613
2614 if dostats:
2614 if dostats:
2615 if p1missing:
2615 if p1missing:
2616 alldata['parentnbrenames'].append(
2616 alldata['parentnbrenames'].append(
2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2618 )
2618 )
2619 alldata['parenttime'].append(
2619 alldata['parenttime'].append(
2620 (data['p1.time'], b.hex(), p1.hex())
2620 (data['p1.time'], b.hex(), p1.hex())
2621 )
2621 )
2622 if p2missing:
2622 if p2missing:
2623 alldata['parentnbrenames'].append(
2623 alldata['parentnbrenames'].append(
2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2625 )
2625 )
2626 alldata['parenttime'].append(
2626 alldata['parenttime'].append(
2627 (data['p2.time'], b.hex(), p2.hex())
2627 (data['p2.time'], b.hex(), p2.hex())
2628 )
2628 )
2629 if p1missing or p2missing:
2629 if p1missing or p2missing:
2630 alldata['totalnbrenames'].append(
2630 alldata['totalnbrenames'].append(
2631 (
2631 (
2632 data['nbrenamedfiles'],
2632 data['nbrenamedfiles'],
2633 b.hex(),
2633 b.hex(),
2634 p1.hex(),
2634 p1.hex(),
2635 p2.hex(),
2635 p2.hex(),
2636 )
2636 )
2637 )
2637 )
2638 alldata['totaltime'].append(
2638 alldata['totaltime'].append(
2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2640 )
2640 )
2641 fm.startitem()
2641 fm.startitem()
2642 fm.data(**data)
2642 fm.data(**data)
2643 # make node pretty for the human output
2643 # make node pretty for the human output
2644 out = data.copy()
2644 out = data.copy()
2645 out['base'] = fm.hexfunc(b.node())
2645 out['base'] = fm.hexfunc(b.node())
2646 out['p1.node'] = fm.hexfunc(p1.node())
2646 out['p1.node'] = fm.hexfunc(p1.node())
2647 out['p2.node'] = fm.hexfunc(p2.node())
2647 out['p2.node'] = fm.hexfunc(p2.node())
2648 fm.plain(output % out)
2648 fm.plain(output % out)
2649
2649
2650 fm.end()
2650 fm.end()
2651 if dostats:
2651 if dostats:
2652 # use a second formatter because the data are quite different, not sure
2652 # use a second formatter because the data are quite different, not sure
2653 # how it flies with the templater.
2653 # how it flies with the templater.
2654 entries = [
2654 entries = [
2655 ('nbrevs', 'number of revision covered'),
2655 ('nbrevs', 'number of revision covered'),
2656 ('nbmissingfiles', 'number of missing files at head'),
2656 ('nbmissingfiles', 'number of missing files at head'),
2657 ]
2657 ]
2658 if dotiming:
2658 if dotiming:
2659 entries.append(
2659 entries.append(
2660 ('parentnbrenames', 'rename from one parent to base')
2660 ('parentnbrenames', 'rename from one parent to base')
2661 )
2661 )
2662 entries.append(('totalnbrenames', 'total number of renames'))
2662 entries.append(('totalnbrenames', 'total number of renames'))
2663 entries.append(('parenttime', 'time for one parent'))
2663 entries.append(('parenttime', 'time for one parent'))
2664 entries.append(('totaltime', 'time for both parents'))
2664 entries.append(('totaltime', 'time for both parents'))
2665 _displaystats(ui, opts, entries, alldata)
2665 _displaystats(ui, opts, entries, alldata)
2666
2666
2667
2667
2668 @command(
2668 @command(
2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2670 formatteropts
2670 formatteropts
2671 + [
2671 + [
2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2673 (b'', b'timing', False, b'provides extra data (costly)'),
2673 (b'', b'timing', False, b'provides extra data (costly)'),
2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2675 ],
2675 ],
2676 )
2676 )
2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2678 """find statistic about potential parameters for the `perftracecopies`
2678 """find statistic about potential parameters for the `perftracecopies`
2679
2679
2680 This command find source-destination pair relevant for copytracing testing.
2680 This command find source-destination pair relevant for copytracing testing.
2681 It report value for some of the parameters that impact copy tracing time.
2681 It report value for some of the parameters that impact copy tracing time.
2682
2682
2683 If `--timing` is set, rename detection is run and the associated timing
2683 If `--timing` is set, rename detection is run and the associated timing
2684 will be reported. The extra details comes at the cost of a slower command
2684 will be reported. The extra details comes at the cost of a slower command
2685 execution.
2685 execution.
2686
2686
2687 Since the rename detection is only run once, other factors might easily
2687 Since the rename detection is only run once, other factors might easily
2688 affect the precision of the timing. However it should give a good
2688 affect the precision of the timing. However it should give a good
2689 approximation of which revision pairs are very costly.
2689 approximation of which revision pairs are very costly.
2690 """
2690 """
2691 opts = _byteskwargs(opts)
2691 opts = _byteskwargs(opts)
2692 fm = ui.formatter(b'perf', opts)
2692 fm = ui.formatter(b'perf', opts)
2693 dotiming = opts[b'timing']
2693 dotiming = opts[b'timing']
2694 dostats = opts[b'stats']
2694 dostats = opts[b'stats']
2695
2695
2696 if dotiming:
2696 if dotiming:
2697 header = '%12s %12s %12s %12s %12s %12s\n'
2697 header = '%12s %12s %12s %12s %12s %12s\n'
2698 output = (
2698 output = (
2699 "%(source)12s %(destination)12s "
2699 "%(source)12s %(destination)12s "
2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2702 )
2702 )
2703 header_names = (
2703 header_names = (
2704 "source",
2704 "source",
2705 "destination",
2705 "destination",
2706 "nb-revs",
2706 "nb-revs",
2707 "nb-files",
2707 "nb-files",
2708 "nb-renames",
2708 "nb-renames",
2709 "time",
2709 "time",
2710 )
2710 )
2711 fm.plain(header % header_names)
2711 fm.plain(header % header_names)
2712 else:
2712 else:
2713 header = '%12s %12s %12s %12s\n'
2713 header = '%12s %12s %12s %12s\n'
2714 output = (
2714 output = (
2715 "%(source)12s %(destination)12s "
2715 "%(source)12s %(destination)12s "
2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2717 )
2717 )
2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2719
2719
2720 if not revs:
2720 if not revs:
2721 revs = ['all()']
2721 revs = ['all()']
2722 revs = scmutil.revrange(repo, revs)
2722 revs = scmutil.revrange(repo, revs)
2723
2723
2724 if dostats:
2724 if dostats:
2725 alldata = {
2725 alldata = {
2726 'nbrevs': [],
2726 'nbrevs': [],
2727 'nbmissingfiles': [],
2727 'nbmissingfiles': [],
2728 }
2728 }
2729 if dotiming:
2729 if dotiming:
2730 alldata['nbrenames'] = []
2730 alldata['nbrenames'] = []
2731 alldata['time'] = []
2731 alldata['time'] = []
2732
2732
2733 roi = repo.revs('merge() and %ld', revs)
2733 roi = repo.revs('merge() and %ld', revs)
2734 for r in roi:
2734 for r in roi:
2735 ctx = repo[r]
2735 ctx = repo[r]
2736 p1 = ctx.p1().rev()
2736 p1 = ctx.p1().rev()
2737 p2 = ctx.p2().rev()
2737 p2 = ctx.p2().rev()
2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2739 for p in (p1, p2):
2739 for p in (p1, p2):
2740 for b in bases:
2740 for b in bases:
2741 base = repo[b]
2741 base = repo[b]
2742 parent = repo[p]
2742 parent = repo[p]
2743 missing = copies._computeforwardmissing(base, parent)
2743 missing = copies._computeforwardmissing(base, parent)
2744 if not missing:
2744 if not missing:
2745 continue
2745 continue
2746 data = {
2746 data = {
2747 b'source': base.hex(),
2747 b'source': base.hex(),
2748 b'destination': parent.hex(),
2748 b'destination': parent.hex(),
2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2750 b'nbmissingfiles': len(missing),
2750 b'nbmissingfiles': len(missing),
2751 }
2751 }
2752 if dostats:
2752 if dostats:
2753 alldata['nbrevs'].append(
2753 alldata['nbrevs'].append(
2754 (
2754 (
2755 data['nbrevs'],
2755 data['nbrevs'],
2756 base.hex(),
2756 base.hex(),
2757 parent.hex(),
2757 parent.hex(),
2758 )
2758 )
2759 )
2759 )
2760 alldata['nbmissingfiles'].append(
2760 alldata['nbmissingfiles'].append(
2761 (
2761 (
2762 data['nbmissingfiles'],
2762 data['nbmissingfiles'],
2763 base.hex(),
2763 base.hex(),
2764 parent.hex(),
2764 parent.hex(),
2765 )
2765 )
2766 )
2766 )
2767 if dotiming:
2767 if dotiming:
2768 begin = util.timer()
2768 begin = util.timer()
2769 renames = copies.pathcopies(base, parent)
2769 renames = copies.pathcopies(base, parent)
2770 end = util.timer()
2770 end = util.timer()
2771 # not very stable timing since we did only one run
2771 # not very stable timing since we did only one run
2772 data['time'] = end - begin
2772 data['time'] = end - begin
2773 data['nbrenamedfiles'] = len(renames)
2773 data['nbrenamedfiles'] = len(renames)
2774 if dostats:
2774 if dostats:
2775 alldata['time'].append(
2775 alldata['time'].append(
2776 (
2776 (
2777 data['time'],
2777 data['time'],
2778 base.hex(),
2778 base.hex(),
2779 parent.hex(),
2779 parent.hex(),
2780 )
2780 )
2781 )
2781 )
2782 alldata['nbrenames'].append(
2782 alldata['nbrenames'].append(
2783 (
2783 (
2784 data['nbrenamedfiles'],
2784 data['nbrenamedfiles'],
2785 base.hex(),
2785 base.hex(),
2786 parent.hex(),
2786 parent.hex(),
2787 )
2787 )
2788 )
2788 )
2789 fm.startitem()
2789 fm.startitem()
2790 fm.data(**data)
2790 fm.data(**data)
2791 out = data.copy()
2791 out = data.copy()
2792 out['source'] = fm.hexfunc(base.node())
2792 out['source'] = fm.hexfunc(base.node())
2793 out['destination'] = fm.hexfunc(parent.node())
2793 out['destination'] = fm.hexfunc(parent.node())
2794 fm.plain(output % out)
2794 fm.plain(output % out)
2795
2795
2796 fm.end()
2796 fm.end()
2797 if dostats:
2797 if dostats:
2798 entries = [
2798 entries = [
2799 ('nbrevs', 'number of revision covered'),
2799 ('nbrevs', 'number of revision covered'),
2800 ('nbmissingfiles', 'number of missing files at head'),
2800 ('nbmissingfiles', 'number of missing files at head'),
2801 ]
2801 ]
2802 if dotiming:
2802 if dotiming:
2803 entries.append(('nbrenames', 'renamed files'))
2803 entries.append(('nbrenames', 'renamed files'))
2804 entries.append(('time', 'time'))
2804 entries.append(('time', 'time'))
2805 _displaystats(ui, opts, entries, alldata)
2805 _displaystats(ui, opts, entries, alldata)
2806
2806
2807
2807
2808 @command(b'perf::cca|perfcca', formatteropts)
2808 @command(b'perf::cca|perfcca', formatteropts)
2809 def perfcca(ui, repo, **opts):
2809 def perfcca(ui, repo, **opts):
2810 opts = _byteskwargs(opts)
2810 opts = _byteskwargs(opts)
2811 timer, fm = gettimer(ui, opts)
2811 timer, fm = gettimer(ui, opts)
2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2813 fm.end()
2813 fm.end()
2814
2814
2815
2815
2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2817 def perffncacheload(ui, repo, **opts):
2817 def perffncacheload(ui, repo, **opts):
2818 opts = _byteskwargs(opts)
2818 opts = _byteskwargs(opts)
2819 timer, fm = gettimer(ui, opts)
2819 timer, fm = gettimer(ui, opts)
2820 s = repo.store
2820 s = repo.store
2821
2821
2822 def d():
2822 def d():
2823 s.fncache._load()
2823 s.fncache._load()
2824
2824
2825 timer(d)
2825 timer(d)
2826 fm.end()
2826 fm.end()
2827
2827
2828
2828
2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2830 def perffncachewrite(ui, repo, **opts):
2830 def perffncachewrite(ui, repo, **opts):
2831 opts = _byteskwargs(opts)
2831 opts = _byteskwargs(opts)
2832 timer, fm = gettimer(ui, opts)
2832 timer, fm = gettimer(ui, opts)
2833 s = repo.store
2833 s = repo.store
2834 lock = repo.lock()
2834 lock = repo.lock()
2835 s.fncache._load()
2835 s.fncache._load()
2836 tr = repo.transaction(b'perffncachewrite')
2836 tr = repo.transaction(b'perffncachewrite')
2837 tr.addbackup(b'fncache')
2837 tr.addbackup(b'fncache')
2838
2838
2839 def d():
2839 def d():
2840 s.fncache._dirty = True
2840 s.fncache._dirty = True
2841 s.fncache.write(tr)
2841 s.fncache.write(tr)
2842
2842
2843 timer(d)
2843 timer(d)
2844 tr.close()
2844 tr.close()
2845 lock.release()
2845 lock.release()
2846 fm.end()
2846 fm.end()
2847
2847
2848
2848
2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2850 def perffncacheencode(ui, repo, **opts):
2850 def perffncacheencode(ui, repo, **opts):
2851 opts = _byteskwargs(opts)
2851 opts = _byteskwargs(opts)
2852 timer, fm = gettimer(ui, opts)
2852 timer, fm = gettimer(ui, opts)
2853 s = repo.store
2853 s = repo.store
2854 s.fncache._load()
2854 s.fncache._load()
2855
2855
2856 def d():
2856 def d():
2857 for p in s.fncache.entries:
2857 for p in s.fncache.entries:
2858 s.encode(p)
2858 s.encode(p)
2859
2859
2860 timer(d)
2860 timer(d)
2861 fm.end()
2861 fm.end()
2862
2862
2863
2863
2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2865 while not done.is_set():
2865 while not done.is_set():
2866 pair = q.get()
2866 pair = q.get()
2867 while pair is not None:
2867 while pair is not None:
2868 if xdiff:
2868 if xdiff:
2869 mdiff.bdiff.xdiffblocks(*pair)
2869 mdiff.bdiff.xdiffblocks(*pair)
2870 elif blocks:
2870 elif blocks:
2871 mdiff.bdiff.blocks(*pair)
2871 mdiff.bdiff.blocks(*pair)
2872 else:
2872 else:
2873 mdiff.textdiff(*pair)
2873 mdiff.textdiff(*pair)
2874 q.task_done()
2874 q.task_done()
2875 pair = q.get()
2875 pair = q.get()
2876 q.task_done() # for the None one
2876 q.task_done() # for the None one
2877 with ready:
2877 with ready:
2878 ready.wait()
2878 ready.wait()
2879
2879
2880
2880
2881 def _manifestrevision(repo, mnode):
2881 def _manifestrevision(repo, mnode):
2882 ml = repo.manifestlog
2882 ml = repo.manifestlog
2883
2883
2884 if util.safehasattr(ml, b'getstorage'):
2884 if util.safehasattr(ml, b'getstorage'):
2885 store = ml.getstorage(b'')
2885 store = ml.getstorage(b'')
2886 else:
2886 else:
2887 store = ml._revlog
2887 store = ml._revlog
2888
2888
2889 return store.revision(mnode)
2889 return store.revision(mnode)
2890
2890
2891
2891
2892 @command(
2892 @command(
2893 b'perf::bdiff|perfbdiff',
2893 b'perf::bdiff|perfbdiff',
2894 revlogopts
2894 revlogopts
2895 + formatteropts
2895 + formatteropts
2896 + [
2896 + [
2897 (
2897 (
2898 b'',
2898 b'',
2899 b'count',
2899 b'count',
2900 1,
2900 1,
2901 b'number of revisions to test (when using --startrev)',
2901 b'number of revisions to test (when using --startrev)',
2902 ),
2902 ),
2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2907 ],
2907 ],
2908 b'-c|-m|FILE REV',
2908 b'-c|-m|FILE REV',
2909 )
2909 )
2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2911 """benchmark a bdiff between revisions
2911 """benchmark a bdiff between revisions
2912
2912
2913 By default, benchmark a bdiff between its delta parent and itself.
2913 By default, benchmark a bdiff between its delta parent and itself.
2914
2914
2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2916 revisions starting at the specified revision.
2916 revisions starting at the specified revision.
2917
2917
2918 With ``--alldata``, assume the requested revision is a changeset and
2918 With ``--alldata``, assume the requested revision is a changeset and
2919 measure bdiffs for all changes related to that changeset (manifest
2919 measure bdiffs for all changes related to that changeset (manifest
2920 and filelogs).
2920 and filelogs).
2921 """
2921 """
2922 opts = _byteskwargs(opts)
2922 opts = _byteskwargs(opts)
2923
2923
2924 if opts[b'xdiff'] and not opts[b'blocks']:
2924 if opts[b'xdiff'] and not opts[b'blocks']:
2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2926
2926
2927 if opts[b'alldata']:
2927 if opts[b'alldata']:
2928 opts[b'changelog'] = True
2928 opts[b'changelog'] = True
2929
2929
2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2931 file_, rev = None, file_
2931 file_, rev = None, file_
2932 elif rev is None:
2932 elif rev is None:
2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2934
2934
2935 blocks = opts[b'blocks']
2935 blocks = opts[b'blocks']
2936 xdiff = opts[b'xdiff']
2936 xdiff = opts[b'xdiff']
2937 textpairs = []
2937 textpairs = []
2938
2938
2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2940
2940
2941 startrev = r.rev(r.lookup(rev))
2941 startrev = r.rev(r.lookup(rev))
2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2943 if opts[b'alldata']:
2943 if opts[b'alldata']:
2944 # Load revisions associated with changeset.
2944 # Load revisions associated with changeset.
2945 ctx = repo[rev]
2945 ctx = repo[rev]
2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2947 for pctx in ctx.parents():
2947 for pctx in ctx.parents():
2948 pman = _manifestrevision(repo, pctx.manifestnode())
2948 pman = _manifestrevision(repo, pctx.manifestnode())
2949 textpairs.append((pman, mtext))
2949 textpairs.append((pman, mtext))
2950
2950
2951 # Load filelog revisions by iterating manifest delta.
2951 # Load filelog revisions by iterating manifest delta.
2952 man = ctx.manifest()
2952 man = ctx.manifest()
2953 pman = ctx.p1().manifest()
2953 pman = ctx.p1().manifest()
2954 for filename, change in pman.diff(man).items():
2954 for filename, change in pman.diff(man).items():
2955 fctx = repo.file(filename)
2955 fctx = repo.file(filename)
2956 f1 = fctx.revision(change[0][0] or -1)
2956 f1 = fctx.revision(change[0][0] or -1)
2957 f2 = fctx.revision(change[1][0] or -1)
2957 f2 = fctx.revision(change[1][0] or -1)
2958 textpairs.append((f1, f2))
2958 textpairs.append((f1, f2))
2959 else:
2959 else:
2960 dp = r.deltaparent(rev)
2960 dp = r.deltaparent(rev)
2961 textpairs.append((r.revision(dp), r.revision(rev)))
2961 textpairs.append((r.revision(dp), r.revision(rev)))
2962
2962
2963 withthreads = threads > 0
2963 withthreads = threads > 0
2964 if not withthreads:
2964 if not withthreads:
2965
2965
2966 def d():
2966 def d():
2967 for pair in textpairs:
2967 for pair in textpairs:
2968 if xdiff:
2968 if xdiff:
2969 mdiff.bdiff.xdiffblocks(*pair)
2969 mdiff.bdiff.xdiffblocks(*pair)
2970 elif blocks:
2970 elif blocks:
2971 mdiff.bdiff.blocks(*pair)
2971 mdiff.bdiff.blocks(*pair)
2972 else:
2972 else:
2973 mdiff.textdiff(*pair)
2973 mdiff.textdiff(*pair)
2974
2974
2975 else:
2975 else:
2976 q = queue()
2976 q = queue()
2977 for i in _xrange(threads):
2977 for i in _xrange(threads):
2978 q.put(None)
2978 q.put(None)
2979 ready = threading.Condition()
2979 ready = threading.Condition()
2980 done = threading.Event()
2980 done = threading.Event()
2981 for i in _xrange(threads):
2981 for i in _xrange(threads):
2982 threading.Thread(
2982 threading.Thread(
2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2984 ).start()
2984 ).start()
2985 q.join()
2985 q.join()
2986
2986
2987 def d():
2987 def d():
2988 for pair in textpairs:
2988 for pair in textpairs:
2989 q.put(pair)
2989 q.put(pair)
2990 for i in _xrange(threads):
2990 for i in _xrange(threads):
2991 q.put(None)
2991 q.put(None)
2992 with ready:
2992 with ready:
2993 ready.notify_all()
2993 ready.notify_all()
2994 q.join()
2994 q.join()
2995
2995
2996 timer, fm = gettimer(ui, opts)
2996 timer, fm = gettimer(ui, opts)
2997 timer(d)
2997 timer(d)
2998 fm.end()
2998 fm.end()
2999
2999
3000 if withthreads:
3000 if withthreads:
3001 done.set()
3001 done.set()
3002 for i in _xrange(threads):
3002 for i in _xrange(threads):
3003 q.put(None)
3003 q.put(None)
3004 with ready:
3004 with ready:
3005 ready.notify_all()
3005 ready.notify_all()
3006
3006
3007
3007
3008 @command(
3008 @command(
3009 b'perf::unbundle',
3009 b'perf::unbundle',
3010 formatteropts,
3010 [
3011 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3012 ]
3013 + formatteropts,
3011 b'BUNDLE_FILE',
3014 b'BUNDLE_FILE',
3012 )
3015 )
3013 def perf_unbundle(ui, repo, fname, **opts):
3016 def perf_unbundle(ui, repo, fname, **opts):
3014 """benchmark application of a bundle in a repository.
3017 """benchmark application of a bundle in a repository.
3015
3018
3016 This does not include the final transaction processing"""
3019 This does not include the final transaction processing
3020
3021 The --as-push option make the unbundle operation appears like it comes from
3022 a client push. It change some aspect of the processing and associated
3023 performance profile.
3024 """
3017
3025
3018 from mercurial import exchange
3026 from mercurial import exchange
3019 from mercurial import bundle2
3027 from mercurial import bundle2
3020 from mercurial import transaction
3028 from mercurial import transaction
3021
3029
3022 opts = _byteskwargs(opts)
3030 opts = _byteskwargs(opts)
3023
3031
3024 ### some compatibility hotfix
3032 ### some compatibility hotfix
3025 #
3033 #
3026 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3034 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3027 # critical regression that break transaction rollback for files that are
3035 # critical regression that break transaction rollback for files that are
3028 # de-inlined.
3036 # de-inlined.
3029 method = transaction.transaction._addentry
3037 method = transaction.transaction._addentry
3030 pre_63edc384d3b7 = "data" in getargspec(method).args
3038 pre_63edc384d3b7 = "data" in getargspec(method).args
3031 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3039 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3032 # a changeset that is a close descendant of 18415fc918a1, the changeset
3040 # a changeset that is a close descendant of 18415fc918a1, the changeset
3033 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3041 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3034 args = getargspec(error.Abort.__init__).args
3042 args = getargspec(error.Abort.__init__).args
3035 post_18415fc918a1 = "detailed_exit_code" in args
3043 post_18415fc918a1 = "detailed_exit_code" in args
3036
3044
3045 unbundle_source = b'perf::unbundle'
3046 if opts[b'as_push']:
3047 unbundle_source = b'push'
3048
3037 old_max_inline = None
3049 old_max_inline = None
3038 try:
3050 try:
3039 if not (pre_63edc384d3b7 or post_18415fc918a1):
3051 if not (pre_63edc384d3b7 or post_18415fc918a1):
3040 # disable inlining
3052 # disable inlining
3041 old_max_inline = mercurial.revlog._maxinline
3053 old_max_inline = mercurial.revlog._maxinline
3042 # large enough to never happen
3054 # large enough to never happen
3043 mercurial.revlog._maxinline = 2 ** 50
3055 mercurial.revlog._maxinline = 2 ** 50
3044
3056
3045 with repo.lock():
3057 with repo.lock():
3046 bundle = [None, None]
3058 bundle = [None, None]
3047 orig_quiet = repo.ui.quiet
3059 orig_quiet = repo.ui.quiet
3048 try:
3060 try:
3049 repo.ui.quiet = True
3061 repo.ui.quiet = True
3050 with open(fname, mode="rb") as f:
3062 with open(fname, mode="rb") as f:
3051
3063
3052 def noop_report(*args, **kwargs):
3064 def noop_report(*args, **kwargs):
3053 pass
3065 pass
3054
3066
3055 def setup():
3067 def setup():
3056 gen, tr = bundle
3068 gen, tr = bundle
3057 if tr is not None:
3069 if tr is not None:
3058 tr.abort()
3070 tr.abort()
3059 bundle[:] = [None, None]
3071 bundle[:] = [None, None]
3060 f.seek(0)
3072 f.seek(0)
3061 bundle[0] = exchange.readbundle(ui, f, fname)
3073 bundle[0] = exchange.readbundle(ui, f, fname)
3062 bundle[1] = repo.transaction(b'perf::unbundle')
3074 bundle[1] = repo.transaction(b'perf::unbundle')
3063 # silence the transaction
3075 # silence the transaction
3064 bundle[1]._report = noop_report
3076 bundle[1]._report = noop_report
3065
3077
3066 def apply():
3078 def apply():
3067 gen, tr = bundle
3079 gen, tr = bundle
3068 bundle2.applybundle(
3080 bundle2.applybundle(
3069 repo,
3081 repo,
3070 gen,
3082 gen,
3071 tr,
3083 tr,
3072 source=b'perf::unbundle',
3084 source=unbundle_source,
3073 url=fname,
3085 url=fname,
3074 )
3086 )
3075
3087
3076 timer, fm = gettimer(ui, opts)
3088 timer, fm = gettimer(ui, opts)
3077 timer(apply, setup=setup)
3089 timer(apply, setup=setup)
3078 fm.end()
3090 fm.end()
3079 finally:
3091 finally:
3080 repo.ui.quiet == orig_quiet
3092 repo.ui.quiet == orig_quiet
3081 gen, tr = bundle
3093 gen, tr = bundle
3082 if tr is not None:
3094 if tr is not None:
3083 tr.abort()
3095 tr.abort()
3084 finally:
3096 finally:
3085 if old_max_inline is not None:
3097 if old_max_inline is not None:
3086 mercurial.revlog._maxinline = old_max_inline
3098 mercurial.revlog._maxinline = old_max_inline
3087
3099
3088
3100
3089 @command(
3101 @command(
3090 b'perf::unidiff|perfunidiff',
3102 b'perf::unidiff|perfunidiff',
3091 revlogopts
3103 revlogopts
3092 + formatteropts
3104 + formatteropts
3093 + [
3105 + [
3094 (
3106 (
3095 b'',
3107 b'',
3096 b'count',
3108 b'count',
3097 1,
3109 1,
3098 b'number of revisions to test (when using --startrev)',
3110 b'number of revisions to test (when using --startrev)',
3099 ),
3111 ),
3100 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3112 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3101 ],
3113 ],
3102 b'-c|-m|FILE REV',
3114 b'-c|-m|FILE REV',
3103 )
3115 )
3104 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3116 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3105 """benchmark a unified diff between revisions
3117 """benchmark a unified diff between revisions
3106
3118
3107 This doesn't include any copy tracing - it's just a unified diff
3119 This doesn't include any copy tracing - it's just a unified diff
3108 of the texts.
3120 of the texts.
3109
3121
3110 By default, benchmark a diff between its delta parent and itself.
3122 By default, benchmark a diff between its delta parent and itself.
3111
3123
3112 With ``--count``, benchmark diffs between delta parents and self for N
3124 With ``--count``, benchmark diffs between delta parents and self for N
3113 revisions starting at the specified revision.
3125 revisions starting at the specified revision.
3114
3126
3115 With ``--alldata``, assume the requested revision is a changeset and
3127 With ``--alldata``, assume the requested revision is a changeset and
3116 measure diffs for all changes related to that changeset (manifest
3128 measure diffs for all changes related to that changeset (manifest
3117 and filelogs).
3129 and filelogs).
3118 """
3130 """
3119 opts = _byteskwargs(opts)
3131 opts = _byteskwargs(opts)
3120 if opts[b'alldata']:
3132 if opts[b'alldata']:
3121 opts[b'changelog'] = True
3133 opts[b'changelog'] = True
3122
3134
3123 if opts.get(b'changelog') or opts.get(b'manifest'):
3135 if opts.get(b'changelog') or opts.get(b'manifest'):
3124 file_, rev = None, file_
3136 file_, rev = None, file_
3125 elif rev is None:
3137 elif rev is None:
3126 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3138 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3127
3139
3128 textpairs = []
3140 textpairs = []
3129
3141
3130 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3142 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3131
3143
3132 startrev = r.rev(r.lookup(rev))
3144 startrev = r.rev(r.lookup(rev))
3133 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3145 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3134 if opts[b'alldata']:
3146 if opts[b'alldata']:
3135 # Load revisions associated with changeset.
3147 # Load revisions associated with changeset.
3136 ctx = repo[rev]
3148 ctx = repo[rev]
3137 mtext = _manifestrevision(repo, ctx.manifestnode())
3149 mtext = _manifestrevision(repo, ctx.manifestnode())
3138 for pctx in ctx.parents():
3150 for pctx in ctx.parents():
3139 pman = _manifestrevision(repo, pctx.manifestnode())
3151 pman = _manifestrevision(repo, pctx.manifestnode())
3140 textpairs.append((pman, mtext))
3152 textpairs.append((pman, mtext))
3141
3153
3142 # Load filelog revisions by iterating manifest delta.
3154 # Load filelog revisions by iterating manifest delta.
3143 man = ctx.manifest()
3155 man = ctx.manifest()
3144 pman = ctx.p1().manifest()
3156 pman = ctx.p1().manifest()
3145 for filename, change in pman.diff(man).items():
3157 for filename, change in pman.diff(man).items():
3146 fctx = repo.file(filename)
3158 fctx = repo.file(filename)
3147 f1 = fctx.revision(change[0][0] or -1)
3159 f1 = fctx.revision(change[0][0] or -1)
3148 f2 = fctx.revision(change[1][0] or -1)
3160 f2 = fctx.revision(change[1][0] or -1)
3149 textpairs.append((f1, f2))
3161 textpairs.append((f1, f2))
3150 else:
3162 else:
3151 dp = r.deltaparent(rev)
3163 dp = r.deltaparent(rev)
3152 textpairs.append((r.revision(dp), r.revision(rev)))
3164 textpairs.append((r.revision(dp), r.revision(rev)))
3153
3165
3154 def d():
3166 def d():
3155 for left, right in textpairs:
3167 for left, right in textpairs:
3156 # The date strings don't matter, so we pass empty strings.
3168 # The date strings don't matter, so we pass empty strings.
3157 headerlines, hunks = mdiff.unidiff(
3169 headerlines, hunks = mdiff.unidiff(
3158 left, b'', right, b'', b'left', b'right', binary=False
3170 left, b'', right, b'', b'left', b'right', binary=False
3159 )
3171 )
3160 # consume iterators in roughly the way patch.py does
3172 # consume iterators in roughly the way patch.py does
3161 b'\n'.join(headerlines)
3173 b'\n'.join(headerlines)
3162 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3174 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3163
3175
3164 timer, fm = gettimer(ui, opts)
3176 timer, fm = gettimer(ui, opts)
3165 timer(d)
3177 timer(d)
3166 fm.end()
3178 fm.end()
3167
3179
3168
3180
3169 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3181 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3170 def perfdiffwd(ui, repo, **opts):
3182 def perfdiffwd(ui, repo, **opts):
3171 """Profile diff of working directory changes"""
3183 """Profile diff of working directory changes"""
3172 opts = _byteskwargs(opts)
3184 opts = _byteskwargs(opts)
3173 timer, fm = gettimer(ui, opts)
3185 timer, fm = gettimer(ui, opts)
3174 options = {
3186 options = {
3175 'w': 'ignore_all_space',
3187 'w': 'ignore_all_space',
3176 'b': 'ignore_space_change',
3188 'b': 'ignore_space_change',
3177 'B': 'ignore_blank_lines',
3189 'B': 'ignore_blank_lines',
3178 }
3190 }
3179
3191
3180 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3192 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3181 opts = {options[c]: b'1' for c in diffopt}
3193 opts = {options[c]: b'1' for c in diffopt}
3182
3194
3183 def d():
3195 def d():
3184 ui.pushbuffer()
3196 ui.pushbuffer()
3185 commands.diff(ui, repo, **opts)
3197 commands.diff(ui, repo, **opts)
3186 ui.popbuffer()
3198 ui.popbuffer()
3187
3199
3188 diffopt = diffopt.encode('ascii')
3200 diffopt = diffopt.encode('ascii')
3189 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3201 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3190 timer(d, title=title)
3202 timer(d, title=title)
3191 fm.end()
3203 fm.end()
3192
3204
3193
3205
3194 @command(
3206 @command(
3195 b'perf::revlogindex|perfrevlogindex',
3207 b'perf::revlogindex|perfrevlogindex',
3196 revlogopts + formatteropts,
3208 revlogopts + formatteropts,
3197 b'-c|-m|FILE',
3209 b'-c|-m|FILE',
3198 )
3210 )
3199 def perfrevlogindex(ui, repo, file_=None, **opts):
3211 def perfrevlogindex(ui, repo, file_=None, **opts):
3200 """Benchmark operations against a revlog index.
3212 """Benchmark operations against a revlog index.
3201
3213
3202 This tests constructing a revlog instance, reading index data,
3214 This tests constructing a revlog instance, reading index data,
3203 parsing index data, and performing various operations related to
3215 parsing index data, and performing various operations related to
3204 index data.
3216 index data.
3205 """
3217 """
3206
3218
3207 opts = _byteskwargs(opts)
3219 opts = _byteskwargs(opts)
3208
3220
3209 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3221 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3210
3222
3211 opener = getattr(rl, 'opener') # trick linter
3223 opener = getattr(rl, 'opener') # trick linter
3212 # compat with hg <= 5.8
3224 # compat with hg <= 5.8
3213 radix = getattr(rl, 'radix', None)
3225 radix = getattr(rl, 'radix', None)
3214 indexfile = getattr(rl, '_indexfile', None)
3226 indexfile = getattr(rl, '_indexfile', None)
3215 if indexfile is None:
3227 if indexfile is None:
3216 # compatibility with <= hg-5.8
3228 # compatibility with <= hg-5.8
3217 indexfile = getattr(rl, 'indexfile')
3229 indexfile = getattr(rl, 'indexfile')
3218 data = opener.read(indexfile)
3230 data = opener.read(indexfile)
3219
3231
3220 header = struct.unpack(b'>I', data[0:4])[0]
3232 header = struct.unpack(b'>I', data[0:4])[0]
3221 version = header & 0xFFFF
3233 version = header & 0xFFFF
3222 if version == 1:
3234 if version == 1:
3223 inline = header & (1 << 16)
3235 inline = header & (1 << 16)
3224 else:
3236 else:
3225 raise error.Abort(b'unsupported revlog version: %d' % version)
3237 raise error.Abort(b'unsupported revlog version: %d' % version)
3226
3238
3227 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3239 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3228 if parse_index_v1 is None:
3240 if parse_index_v1 is None:
3229 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3241 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3230
3242
3231 rllen = len(rl)
3243 rllen = len(rl)
3232
3244
3233 node0 = rl.node(0)
3245 node0 = rl.node(0)
3234 node25 = rl.node(rllen // 4)
3246 node25 = rl.node(rllen // 4)
3235 node50 = rl.node(rllen // 2)
3247 node50 = rl.node(rllen // 2)
3236 node75 = rl.node(rllen // 4 * 3)
3248 node75 = rl.node(rllen // 4 * 3)
3237 node100 = rl.node(rllen - 1)
3249 node100 = rl.node(rllen - 1)
3238
3250
3239 allrevs = range(rllen)
3251 allrevs = range(rllen)
3240 allrevsrev = list(reversed(allrevs))
3252 allrevsrev = list(reversed(allrevs))
3241 allnodes = [rl.node(rev) for rev in range(rllen)]
3253 allnodes = [rl.node(rev) for rev in range(rllen)]
3242 allnodesrev = list(reversed(allnodes))
3254 allnodesrev = list(reversed(allnodes))
3243
3255
3244 def constructor():
3256 def constructor():
3245 if radix is not None:
3257 if radix is not None:
3246 revlog(opener, radix=radix)
3258 revlog(opener, radix=radix)
3247 else:
3259 else:
3248 # hg <= 5.8
3260 # hg <= 5.8
3249 revlog(opener, indexfile=indexfile)
3261 revlog(opener, indexfile=indexfile)
3250
3262
3251 def read():
3263 def read():
3252 with opener(indexfile) as fh:
3264 with opener(indexfile) as fh:
3253 fh.read()
3265 fh.read()
3254
3266
3255 def parseindex():
3267 def parseindex():
3256 parse_index_v1(data, inline)
3268 parse_index_v1(data, inline)
3257
3269
3258 def getentry(revornode):
3270 def getentry(revornode):
3259 index = parse_index_v1(data, inline)[0]
3271 index = parse_index_v1(data, inline)[0]
3260 index[revornode]
3272 index[revornode]
3261
3273
3262 def getentries(revs, count=1):
3274 def getentries(revs, count=1):
3263 index = parse_index_v1(data, inline)[0]
3275 index = parse_index_v1(data, inline)[0]
3264
3276
3265 for i in range(count):
3277 for i in range(count):
3266 for rev in revs:
3278 for rev in revs:
3267 index[rev]
3279 index[rev]
3268
3280
3269 def resolvenode(node):
3281 def resolvenode(node):
3270 index = parse_index_v1(data, inline)[0]
3282 index = parse_index_v1(data, inline)[0]
3271 rev = getattr(index, 'rev', None)
3283 rev = getattr(index, 'rev', None)
3272 if rev is None:
3284 if rev is None:
3273 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3285 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3274 # This only works for the C code.
3286 # This only works for the C code.
3275 if nodemap is None:
3287 if nodemap is None:
3276 return
3288 return
3277 rev = nodemap.__getitem__
3289 rev = nodemap.__getitem__
3278
3290
3279 try:
3291 try:
3280 rev(node)
3292 rev(node)
3281 except error.RevlogError:
3293 except error.RevlogError:
3282 pass
3294 pass
3283
3295
3284 def resolvenodes(nodes, count=1):
3296 def resolvenodes(nodes, count=1):
3285 index = parse_index_v1(data, inline)[0]
3297 index = parse_index_v1(data, inline)[0]
3286 rev = getattr(index, 'rev', None)
3298 rev = getattr(index, 'rev', None)
3287 if rev is None:
3299 if rev is None:
3288 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3300 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3289 # This only works for the C code.
3301 # This only works for the C code.
3290 if nodemap is None:
3302 if nodemap is None:
3291 return
3303 return
3292 rev = nodemap.__getitem__
3304 rev = nodemap.__getitem__
3293
3305
3294 for i in range(count):
3306 for i in range(count):
3295 for node in nodes:
3307 for node in nodes:
3296 try:
3308 try:
3297 rev(node)
3309 rev(node)
3298 except error.RevlogError:
3310 except error.RevlogError:
3299 pass
3311 pass
3300
3312
3301 benches = [
3313 benches = [
3302 (constructor, b'revlog constructor'),
3314 (constructor, b'revlog constructor'),
3303 (read, b'read'),
3315 (read, b'read'),
3304 (parseindex, b'create index object'),
3316 (parseindex, b'create index object'),
3305 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3317 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3306 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3318 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3307 (lambda: resolvenode(node0), b'look up node at rev 0'),
3319 (lambda: resolvenode(node0), b'look up node at rev 0'),
3308 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3320 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3309 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3321 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3310 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3322 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3311 (lambda: resolvenode(node100), b'look up node at tip'),
3323 (lambda: resolvenode(node100), b'look up node at tip'),
3312 # 2x variation is to measure caching impact.
3324 # 2x variation is to measure caching impact.
3313 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3325 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3314 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3326 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3315 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3327 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3316 (
3328 (
3317 lambda: resolvenodes(allnodesrev, 2),
3329 lambda: resolvenodes(allnodesrev, 2),
3318 b'look up all nodes 2x (reverse)',
3330 b'look up all nodes 2x (reverse)',
3319 ),
3331 ),
3320 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3332 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3321 (
3333 (
3322 lambda: getentries(allrevs, 2),
3334 lambda: getentries(allrevs, 2),
3323 b'retrieve all index entries 2x (forward)',
3335 b'retrieve all index entries 2x (forward)',
3324 ),
3336 ),
3325 (
3337 (
3326 lambda: getentries(allrevsrev),
3338 lambda: getentries(allrevsrev),
3327 b'retrieve all index entries (reverse)',
3339 b'retrieve all index entries (reverse)',
3328 ),
3340 ),
3329 (
3341 (
3330 lambda: getentries(allrevsrev, 2),
3342 lambda: getentries(allrevsrev, 2),
3331 b'retrieve all index entries 2x (reverse)',
3343 b'retrieve all index entries 2x (reverse)',
3332 ),
3344 ),
3333 ]
3345 ]
3334
3346
3335 for fn, title in benches:
3347 for fn, title in benches:
3336 timer, fm = gettimer(ui, opts)
3348 timer, fm = gettimer(ui, opts)
3337 timer(fn, title=title)
3349 timer(fn, title=title)
3338 fm.end()
3350 fm.end()
3339
3351
3340
3352
3341 @command(
3353 @command(
3342 b'perf::revlogrevisions|perfrevlogrevisions',
3354 b'perf::revlogrevisions|perfrevlogrevisions',
3343 revlogopts
3355 revlogopts
3344 + formatteropts
3356 + formatteropts
3345 + [
3357 + [
3346 (b'd', b'dist', 100, b'distance between the revisions'),
3358 (b'd', b'dist', 100, b'distance between the revisions'),
3347 (b's', b'startrev', 0, b'revision to start reading at'),
3359 (b's', b'startrev', 0, b'revision to start reading at'),
3348 (b'', b'reverse', False, b'read in reverse'),
3360 (b'', b'reverse', False, b'read in reverse'),
3349 ],
3361 ],
3350 b'-c|-m|FILE',
3362 b'-c|-m|FILE',
3351 )
3363 )
3352 def perfrevlogrevisions(
3364 def perfrevlogrevisions(
3353 ui, repo, file_=None, startrev=0, reverse=False, **opts
3365 ui, repo, file_=None, startrev=0, reverse=False, **opts
3354 ):
3366 ):
3355 """Benchmark reading a series of revisions from a revlog.
3367 """Benchmark reading a series of revisions from a revlog.
3356
3368
3357 By default, we read every ``-d/--dist`` revision from 0 to tip of
3369 By default, we read every ``-d/--dist`` revision from 0 to tip of
3358 the specified revlog.
3370 the specified revlog.
3359
3371
3360 The start revision can be defined via ``-s/--startrev``.
3372 The start revision can be defined via ``-s/--startrev``.
3361 """
3373 """
3362 opts = _byteskwargs(opts)
3374 opts = _byteskwargs(opts)
3363
3375
3364 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3376 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3365 rllen = getlen(ui)(rl)
3377 rllen = getlen(ui)(rl)
3366
3378
3367 if startrev < 0:
3379 if startrev < 0:
3368 startrev = rllen + startrev
3380 startrev = rllen + startrev
3369
3381
3370 def d():
3382 def d():
3371 rl.clearcaches()
3383 rl.clearcaches()
3372
3384
3373 beginrev = startrev
3385 beginrev = startrev
3374 endrev = rllen
3386 endrev = rllen
3375 dist = opts[b'dist']
3387 dist = opts[b'dist']
3376
3388
3377 if reverse:
3389 if reverse:
3378 beginrev, endrev = endrev - 1, beginrev - 1
3390 beginrev, endrev = endrev - 1, beginrev - 1
3379 dist = -1 * dist
3391 dist = -1 * dist
3380
3392
3381 for x in _xrange(beginrev, endrev, dist):
3393 for x in _xrange(beginrev, endrev, dist):
3382 # Old revisions don't support passing int.
3394 # Old revisions don't support passing int.
3383 n = rl.node(x)
3395 n = rl.node(x)
3384 rl.revision(n)
3396 rl.revision(n)
3385
3397
3386 timer, fm = gettimer(ui, opts)
3398 timer, fm = gettimer(ui, opts)
3387 timer(d)
3399 timer(d)
3388 fm.end()
3400 fm.end()
3389
3401
3390
3402
3391 @command(
3403 @command(
3392 b'perf::revlogwrite|perfrevlogwrite',
3404 b'perf::revlogwrite|perfrevlogwrite',
3393 revlogopts
3405 revlogopts
3394 + formatteropts
3406 + formatteropts
3395 + [
3407 + [
3396 (b's', b'startrev', 1000, b'revision to start writing at'),
3408 (b's', b'startrev', 1000, b'revision to start writing at'),
3397 (b'', b'stoprev', -1, b'last revision to write'),
3409 (b'', b'stoprev', -1, b'last revision to write'),
3398 (b'', b'count', 3, b'number of passes to perform'),
3410 (b'', b'count', 3, b'number of passes to perform'),
3399 (b'', b'details', False, b'print timing for every revisions tested'),
3411 (b'', b'details', False, b'print timing for every revisions tested'),
3400 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3412 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3401 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3413 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3402 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3414 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3403 ],
3415 ],
3404 b'-c|-m|FILE',
3416 b'-c|-m|FILE',
3405 )
3417 )
3406 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3418 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3407 """Benchmark writing a series of revisions to a revlog.
3419 """Benchmark writing a series of revisions to a revlog.
3408
3420
3409 Possible source values are:
3421 Possible source values are:
3410 * `full`: add from a full text (default).
3422 * `full`: add from a full text (default).
3411 * `parent-1`: add from a delta to the first parent
3423 * `parent-1`: add from a delta to the first parent
3412 * `parent-2`: add from a delta to the second parent if it exists
3424 * `parent-2`: add from a delta to the second parent if it exists
3413 (use a delta from the first parent otherwise)
3425 (use a delta from the first parent otherwise)
3414 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3426 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3415 * `storage`: add from the existing precomputed deltas
3427 * `storage`: add from the existing precomputed deltas
3416
3428
3417 Note: This performance command measures performance in a custom way. As a
3429 Note: This performance command measures performance in a custom way. As a
3418 result some of the global configuration of the 'perf' command does not
3430 result some of the global configuration of the 'perf' command does not
3419 apply to it:
3431 apply to it:
3420
3432
3421 * ``pre-run``: disabled
3433 * ``pre-run``: disabled
3422
3434
3423 * ``profile-benchmark``: disabled
3435 * ``profile-benchmark``: disabled
3424
3436
3425 * ``run-limits``: disabled use --count instead
3437 * ``run-limits``: disabled use --count instead
3426 """
3438 """
3427 opts = _byteskwargs(opts)
3439 opts = _byteskwargs(opts)
3428
3440
3429 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3441 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3430 rllen = getlen(ui)(rl)
3442 rllen = getlen(ui)(rl)
3431 if startrev < 0:
3443 if startrev < 0:
3432 startrev = rllen + startrev
3444 startrev = rllen + startrev
3433 if stoprev < 0:
3445 if stoprev < 0:
3434 stoprev = rllen + stoprev
3446 stoprev = rllen + stoprev
3435
3447
3436 lazydeltabase = opts['lazydeltabase']
3448 lazydeltabase = opts['lazydeltabase']
3437 source = opts['source']
3449 source = opts['source']
3438 clearcaches = opts['clear_caches']
3450 clearcaches = opts['clear_caches']
3439 validsource = (
3451 validsource = (
3440 b'full',
3452 b'full',
3441 b'parent-1',
3453 b'parent-1',
3442 b'parent-2',
3454 b'parent-2',
3443 b'parent-smallest',
3455 b'parent-smallest',
3444 b'storage',
3456 b'storage',
3445 )
3457 )
3446 if source not in validsource:
3458 if source not in validsource:
3447 raise error.Abort('invalid source type: %s' % source)
3459 raise error.Abort('invalid source type: %s' % source)
3448
3460
3449 ### actually gather results
3461 ### actually gather results
3450 count = opts['count']
3462 count = opts['count']
3451 if count <= 0:
3463 if count <= 0:
3452 raise error.Abort('invalide run count: %d' % count)
3464 raise error.Abort('invalide run count: %d' % count)
3453 allresults = []
3465 allresults = []
3454 for c in range(count):
3466 for c in range(count):
3455 timing = _timeonewrite(
3467 timing = _timeonewrite(
3456 ui,
3468 ui,
3457 rl,
3469 rl,
3458 source,
3470 source,
3459 startrev,
3471 startrev,
3460 stoprev,
3472 stoprev,
3461 c + 1,
3473 c + 1,
3462 lazydeltabase=lazydeltabase,
3474 lazydeltabase=lazydeltabase,
3463 clearcaches=clearcaches,
3475 clearcaches=clearcaches,
3464 )
3476 )
3465 allresults.append(timing)
3477 allresults.append(timing)
3466
3478
3467 ### consolidate the results in a single list
3479 ### consolidate the results in a single list
3468 results = []
3480 results = []
3469 for idx, (rev, t) in enumerate(allresults[0]):
3481 for idx, (rev, t) in enumerate(allresults[0]):
3470 ts = [t]
3482 ts = [t]
3471 for other in allresults[1:]:
3483 for other in allresults[1:]:
3472 orev, ot = other[idx]
3484 orev, ot = other[idx]
3473 assert orev == rev
3485 assert orev == rev
3474 ts.append(ot)
3486 ts.append(ot)
3475 results.append((rev, ts))
3487 results.append((rev, ts))
3476 resultcount = len(results)
3488 resultcount = len(results)
3477
3489
3478 ### Compute and display relevant statistics
3490 ### Compute and display relevant statistics
3479
3491
3480 # get a formatter
3492 # get a formatter
3481 fm = ui.formatter(b'perf', opts)
3493 fm = ui.formatter(b'perf', opts)
3482 displayall = ui.configbool(b"perf", b"all-timing", True)
3494 displayall = ui.configbool(b"perf", b"all-timing", True)
3483
3495
3484 # print individual details if requested
3496 # print individual details if requested
3485 if opts['details']:
3497 if opts['details']:
3486 for idx, item in enumerate(results, 1):
3498 for idx, item in enumerate(results, 1):
3487 rev, data = item
3499 rev, data = item
3488 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3500 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3489 formatone(fm, data, title=title, displayall=displayall)
3501 formatone(fm, data, title=title, displayall=displayall)
3490
3502
3491 # sorts results by median time
3503 # sorts results by median time
3492 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3504 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3493 # list of (name, index) to display)
3505 # list of (name, index) to display)
3494 relevants = [
3506 relevants = [
3495 ("min", 0),
3507 ("min", 0),
3496 ("10%", resultcount * 10 // 100),
3508 ("10%", resultcount * 10 // 100),
3497 ("25%", resultcount * 25 // 100),
3509 ("25%", resultcount * 25 // 100),
3498 ("50%", resultcount * 70 // 100),
3510 ("50%", resultcount * 70 // 100),
3499 ("75%", resultcount * 75 // 100),
3511 ("75%", resultcount * 75 // 100),
3500 ("90%", resultcount * 90 // 100),
3512 ("90%", resultcount * 90 // 100),
3501 ("95%", resultcount * 95 // 100),
3513 ("95%", resultcount * 95 // 100),
3502 ("99%", resultcount * 99 // 100),
3514 ("99%", resultcount * 99 // 100),
3503 ("99.9%", resultcount * 999 // 1000),
3515 ("99.9%", resultcount * 999 // 1000),
3504 ("99.99%", resultcount * 9999 // 10000),
3516 ("99.99%", resultcount * 9999 // 10000),
3505 ("99.999%", resultcount * 99999 // 100000),
3517 ("99.999%", resultcount * 99999 // 100000),
3506 ("max", -1),
3518 ("max", -1),
3507 ]
3519 ]
3508 if not ui.quiet:
3520 if not ui.quiet:
3509 for name, idx in relevants:
3521 for name, idx in relevants:
3510 data = results[idx]
3522 data = results[idx]
3511 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3523 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3512 formatone(fm, data[1], title=title, displayall=displayall)
3524 formatone(fm, data[1], title=title, displayall=displayall)
3513
3525
3514 # XXX summing that many float will not be very precise, we ignore this fact
3526 # XXX summing that many float will not be very precise, we ignore this fact
3515 # for now
3527 # for now
3516 totaltime = []
3528 totaltime = []
3517 for item in allresults:
3529 for item in allresults:
3518 totaltime.append(
3530 totaltime.append(
3519 (
3531 (
3520 sum(x[1][0] for x in item),
3532 sum(x[1][0] for x in item),
3521 sum(x[1][1] for x in item),
3533 sum(x[1][1] for x in item),
3522 sum(x[1][2] for x in item),
3534 sum(x[1][2] for x in item),
3523 )
3535 )
3524 )
3536 )
3525 formatone(
3537 formatone(
3526 fm,
3538 fm,
3527 totaltime,
3539 totaltime,
3528 title="total time (%d revs)" % resultcount,
3540 title="total time (%d revs)" % resultcount,
3529 displayall=displayall,
3541 displayall=displayall,
3530 )
3542 )
3531 fm.end()
3543 fm.end()
3532
3544
3533
3545
3534 class _faketr:
3546 class _faketr:
3535 def add(s, x, y, z=None):
3547 def add(s, x, y, z=None):
3536 return None
3548 return None
3537
3549
3538
3550
3539 def _timeonewrite(
3551 def _timeonewrite(
3540 ui,
3552 ui,
3541 orig,
3553 orig,
3542 source,
3554 source,
3543 startrev,
3555 startrev,
3544 stoprev,
3556 stoprev,
3545 runidx=None,
3557 runidx=None,
3546 lazydeltabase=True,
3558 lazydeltabase=True,
3547 clearcaches=True,
3559 clearcaches=True,
3548 ):
3560 ):
3549 timings = []
3561 timings = []
3550 tr = _faketr()
3562 tr = _faketr()
3551 with _temprevlog(ui, orig, startrev) as dest:
3563 with _temprevlog(ui, orig, startrev) as dest:
3552 if hasattr(dest, "delta_config"):
3564 if hasattr(dest, "delta_config"):
3553 dest.delta_config.lazy_delta_base = lazydeltabase
3565 dest.delta_config.lazy_delta_base = lazydeltabase
3554 else:
3566 else:
3555 dest._lazydeltabase = lazydeltabase
3567 dest._lazydeltabase = lazydeltabase
3556 revs = list(orig.revs(startrev, stoprev))
3568 revs = list(orig.revs(startrev, stoprev))
3557 total = len(revs)
3569 total = len(revs)
3558 topic = 'adding'
3570 topic = 'adding'
3559 if runidx is not None:
3571 if runidx is not None:
3560 topic += ' (run #%d)' % runidx
3572 topic += ' (run #%d)' % runidx
3561 # Support both old and new progress API
3573 # Support both old and new progress API
3562 if util.safehasattr(ui, 'makeprogress'):
3574 if util.safehasattr(ui, 'makeprogress'):
3563 progress = ui.makeprogress(topic, unit='revs', total=total)
3575 progress = ui.makeprogress(topic, unit='revs', total=total)
3564
3576
3565 def updateprogress(pos):
3577 def updateprogress(pos):
3566 progress.update(pos)
3578 progress.update(pos)
3567
3579
3568 def completeprogress():
3580 def completeprogress():
3569 progress.complete()
3581 progress.complete()
3570
3582
3571 else:
3583 else:
3572
3584
3573 def updateprogress(pos):
3585 def updateprogress(pos):
3574 ui.progress(topic, pos, unit='revs', total=total)
3586 ui.progress(topic, pos, unit='revs', total=total)
3575
3587
3576 def completeprogress():
3588 def completeprogress():
3577 ui.progress(topic, None, unit='revs', total=total)
3589 ui.progress(topic, None, unit='revs', total=total)
3578
3590
3579 for idx, rev in enumerate(revs):
3591 for idx, rev in enumerate(revs):
3580 updateprogress(idx)
3592 updateprogress(idx)
3581 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3593 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3582 if clearcaches:
3594 if clearcaches:
3583 dest.index.clearcaches()
3595 dest.index.clearcaches()
3584 dest.clearcaches()
3596 dest.clearcaches()
3585 with timeone() as r:
3597 with timeone() as r:
3586 dest.addrawrevision(*addargs, **addkwargs)
3598 dest.addrawrevision(*addargs, **addkwargs)
3587 timings.append((rev, r[0]))
3599 timings.append((rev, r[0]))
3588 updateprogress(total)
3600 updateprogress(total)
3589 completeprogress()
3601 completeprogress()
3590 return timings
3602 return timings
3591
3603
3592
3604
3593 def _getrevisionseed(orig, rev, tr, source):
3605 def _getrevisionseed(orig, rev, tr, source):
3594 from mercurial.node import nullid
3606 from mercurial.node import nullid
3595
3607
3596 linkrev = orig.linkrev(rev)
3608 linkrev = orig.linkrev(rev)
3597 node = orig.node(rev)
3609 node = orig.node(rev)
3598 p1, p2 = orig.parents(node)
3610 p1, p2 = orig.parents(node)
3599 flags = orig.flags(rev)
3611 flags = orig.flags(rev)
3600 cachedelta = None
3612 cachedelta = None
3601 text = None
3613 text = None
3602
3614
3603 if source == b'full':
3615 if source == b'full':
3604 text = orig.revision(rev)
3616 text = orig.revision(rev)
3605 elif source == b'parent-1':
3617 elif source == b'parent-1':
3606 baserev = orig.rev(p1)
3618 baserev = orig.rev(p1)
3607 cachedelta = (baserev, orig.revdiff(p1, rev))
3619 cachedelta = (baserev, orig.revdiff(p1, rev))
3608 elif source == b'parent-2':
3620 elif source == b'parent-2':
3609 parent = p2
3621 parent = p2
3610 if p2 == nullid:
3622 if p2 == nullid:
3611 parent = p1
3623 parent = p1
3612 baserev = orig.rev(parent)
3624 baserev = orig.rev(parent)
3613 cachedelta = (baserev, orig.revdiff(parent, rev))
3625 cachedelta = (baserev, orig.revdiff(parent, rev))
3614 elif source == b'parent-smallest':
3626 elif source == b'parent-smallest':
3615 p1diff = orig.revdiff(p1, rev)
3627 p1diff = orig.revdiff(p1, rev)
3616 parent = p1
3628 parent = p1
3617 diff = p1diff
3629 diff = p1diff
3618 if p2 != nullid:
3630 if p2 != nullid:
3619 p2diff = orig.revdiff(p2, rev)
3631 p2diff = orig.revdiff(p2, rev)
3620 if len(p1diff) > len(p2diff):
3632 if len(p1diff) > len(p2diff):
3621 parent = p2
3633 parent = p2
3622 diff = p2diff
3634 diff = p2diff
3623 baserev = orig.rev(parent)
3635 baserev = orig.rev(parent)
3624 cachedelta = (baserev, diff)
3636 cachedelta = (baserev, diff)
3625 elif source == b'storage':
3637 elif source == b'storage':
3626 baserev = orig.deltaparent(rev)
3638 baserev = orig.deltaparent(rev)
3627 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3639 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3628
3640
3629 return (
3641 return (
3630 (text, tr, linkrev, p1, p2),
3642 (text, tr, linkrev, p1, p2),
3631 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3643 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3632 )
3644 )
3633
3645
3634
3646
3635 @contextlib.contextmanager
3647 @contextlib.contextmanager
3636 def _temprevlog(ui, orig, truncaterev):
3648 def _temprevlog(ui, orig, truncaterev):
3637 from mercurial import vfs as vfsmod
3649 from mercurial import vfs as vfsmod
3638
3650
3639 if orig._inline:
3651 if orig._inline:
3640 raise error.Abort('not supporting inline revlog (yet)')
3652 raise error.Abort('not supporting inline revlog (yet)')
3641 revlogkwargs = {}
3653 revlogkwargs = {}
3642 k = 'upperboundcomp'
3654 k = 'upperboundcomp'
3643 if util.safehasattr(orig, k):
3655 if util.safehasattr(orig, k):
3644 revlogkwargs[k] = getattr(orig, k)
3656 revlogkwargs[k] = getattr(orig, k)
3645
3657
3646 indexfile = getattr(orig, '_indexfile', None)
3658 indexfile = getattr(orig, '_indexfile', None)
3647 if indexfile is None:
3659 if indexfile is None:
3648 # compatibility with <= hg-5.8
3660 # compatibility with <= hg-5.8
3649 indexfile = getattr(orig, 'indexfile')
3661 indexfile = getattr(orig, 'indexfile')
3650 origindexpath = orig.opener.join(indexfile)
3662 origindexpath = orig.opener.join(indexfile)
3651
3663
3652 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3664 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3653 origdatapath = orig.opener.join(datafile)
3665 origdatapath = orig.opener.join(datafile)
3654 radix = b'revlog'
3666 radix = b'revlog'
3655 indexname = b'revlog.i'
3667 indexname = b'revlog.i'
3656 dataname = b'revlog.d'
3668 dataname = b'revlog.d'
3657
3669
3658 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3670 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3659 try:
3671 try:
3660 # copy the data file in a temporary directory
3672 # copy the data file in a temporary directory
3661 ui.debug('copying data in %s\n' % tmpdir)
3673 ui.debug('copying data in %s\n' % tmpdir)
3662 destindexpath = os.path.join(tmpdir, 'revlog.i')
3674 destindexpath = os.path.join(tmpdir, 'revlog.i')
3663 destdatapath = os.path.join(tmpdir, 'revlog.d')
3675 destdatapath = os.path.join(tmpdir, 'revlog.d')
3664 shutil.copyfile(origindexpath, destindexpath)
3676 shutil.copyfile(origindexpath, destindexpath)
3665 shutil.copyfile(origdatapath, destdatapath)
3677 shutil.copyfile(origdatapath, destdatapath)
3666
3678
3667 # remove the data we want to add again
3679 # remove the data we want to add again
3668 ui.debug('truncating data to be rewritten\n')
3680 ui.debug('truncating data to be rewritten\n')
3669 with open(destindexpath, 'ab') as index:
3681 with open(destindexpath, 'ab') as index:
3670 index.seek(0)
3682 index.seek(0)
3671 index.truncate(truncaterev * orig._io.size)
3683 index.truncate(truncaterev * orig._io.size)
3672 with open(destdatapath, 'ab') as data:
3684 with open(destdatapath, 'ab') as data:
3673 data.seek(0)
3685 data.seek(0)
3674 data.truncate(orig.start(truncaterev))
3686 data.truncate(orig.start(truncaterev))
3675
3687
3676 # instantiate a new revlog from the temporary copy
3688 # instantiate a new revlog from the temporary copy
3677 ui.debug('truncating adding to be rewritten\n')
3689 ui.debug('truncating adding to be rewritten\n')
3678 vfs = vfsmod.vfs(tmpdir)
3690 vfs = vfsmod.vfs(tmpdir)
3679 vfs.options = getattr(orig.opener, 'options', None)
3691 vfs.options = getattr(orig.opener, 'options', None)
3680
3692
3681 try:
3693 try:
3682 dest = revlog(vfs, radix=radix, **revlogkwargs)
3694 dest = revlog(vfs, radix=radix, **revlogkwargs)
3683 except TypeError:
3695 except TypeError:
3684 dest = revlog(
3696 dest = revlog(
3685 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3697 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3686 )
3698 )
3687 if dest._inline:
3699 if dest._inline:
3688 raise error.Abort('not supporting inline revlog (yet)')
3700 raise error.Abort('not supporting inline revlog (yet)')
3689 # make sure internals are initialized
3701 # make sure internals are initialized
3690 dest.revision(len(dest) - 1)
3702 dest.revision(len(dest) - 1)
3691 yield dest
3703 yield dest
3692 del dest, vfs
3704 del dest, vfs
3693 finally:
3705 finally:
3694 shutil.rmtree(tmpdir, True)
3706 shutil.rmtree(tmpdir, True)
3695
3707
3696
3708
3697 @command(
3709 @command(
3698 b'perf::revlogchunks|perfrevlogchunks',
3710 b'perf::revlogchunks|perfrevlogchunks',
3699 revlogopts
3711 revlogopts
3700 + formatteropts
3712 + formatteropts
3701 + [
3713 + [
3702 (b'e', b'engines', b'', b'compression engines to use'),
3714 (b'e', b'engines', b'', b'compression engines to use'),
3703 (b's', b'startrev', 0, b'revision to start at'),
3715 (b's', b'startrev', 0, b'revision to start at'),
3704 ],
3716 ],
3705 b'-c|-m|FILE',
3717 b'-c|-m|FILE',
3706 )
3718 )
3707 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3719 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3708 """Benchmark operations on revlog chunks.
3720 """Benchmark operations on revlog chunks.
3709
3721
3710 Logically, each revlog is a collection of fulltext revisions. However,
3722 Logically, each revlog is a collection of fulltext revisions. However,
3711 stored within each revlog are "chunks" of possibly compressed data. This
3723 stored within each revlog are "chunks" of possibly compressed data. This
3712 data needs to be read and decompressed or compressed and written.
3724 data needs to be read and decompressed or compressed and written.
3713
3725
3714 This command measures the time it takes to read+decompress and recompress
3726 This command measures the time it takes to read+decompress and recompress
3715 chunks in a revlog. It effectively isolates I/O and compression performance.
3727 chunks in a revlog. It effectively isolates I/O and compression performance.
3716 For measurements of higher-level operations like resolving revisions,
3728 For measurements of higher-level operations like resolving revisions,
3717 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3729 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3718 """
3730 """
3719 opts = _byteskwargs(opts)
3731 opts = _byteskwargs(opts)
3720
3732
3721 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3733 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3722
3734
3723 # - _chunkraw was renamed to _getsegmentforrevs
3735 # - _chunkraw was renamed to _getsegmentforrevs
3724 # - _getsegmentforrevs was moved on the inner object
3736 # - _getsegmentforrevs was moved on the inner object
3725 try:
3737 try:
3726 segmentforrevs = rl._inner.get_segment_for_revs
3738 segmentforrevs = rl._inner.get_segment_for_revs
3727 except AttributeError:
3739 except AttributeError:
3728 try:
3740 try:
3729 segmentforrevs = rl._getsegmentforrevs
3741 segmentforrevs = rl._getsegmentforrevs
3730 except AttributeError:
3742 except AttributeError:
3731 segmentforrevs = rl._chunkraw
3743 segmentforrevs = rl._chunkraw
3732
3744
3733 # Verify engines argument.
3745 # Verify engines argument.
3734 if engines:
3746 if engines:
3735 engines = {e.strip() for e in engines.split(b',')}
3747 engines = {e.strip() for e in engines.split(b',')}
3736 for engine in engines:
3748 for engine in engines:
3737 try:
3749 try:
3738 util.compressionengines[engine]
3750 util.compressionengines[engine]
3739 except KeyError:
3751 except KeyError:
3740 raise error.Abort(b'unknown compression engine: %s' % engine)
3752 raise error.Abort(b'unknown compression engine: %s' % engine)
3741 else:
3753 else:
3742 engines = []
3754 engines = []
3743 for e in util.compengines:
3755 for e in util.compengines:
3744 engine = util.compengines[e]
3756 engine = util.compengines[e]
3745 try:
3757 try:
3746 if engine.available():
3758 if engine.available():
3747 engine.revlogcompressor().compress(b'dummy')
3759 engine.revlogcompressor().compress(b'dummy')
3748 engines.append(e)
3760 engines.append(e)
3749 except NotImplementedError:
3761 except NotImplementedError:
3750 pass
3762 pass
3751
3763
3752 revs = list(rl.revs(startrev, len(rl) - 1))
3764 revs = list(rl.revs(startrev, len(rl) - 1))
3753
3765
3754 @contextlib.contextmanager
3766 @contextlib.contextmanager
3755 def reading(rl):
3767 def reading(rl):
3756 if getattr(rl, 'reading', None) is not None:
3768 if getattr(rl, 'reading', None) is not None:
3757 with rl.reading():
3769 with rl.reading():
3758 yield None
3770 yield None
3759 elif rl._inline:
3771 elif rl._inline:
3760 indexfile = getattr(rl, '_indexfile', None)
3772 indexfile = getattr(rl, '_indexfile', None)
3761 if indexfile is None:
3773 if indexfile is None:
3762 # compatibility with <= hg-5.8
3774 # compatibility with <= hg-5.8
3763 indexfile = getattr(rl, 'indexfile')
3775 indexfile = getattr(rl, 'indexfile')
3764 yield getsvfs(repo)(indexfile)
3776 yield getsvfs(repo)(indexfile)
3765 else:
3777 else:
3766 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3778 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3767 yield getsvfs(repo)(datafile)
3779 yield getsvfs(repo)(datafile)
3768
3780
3769 if getattr(rl, 'reading', None) is not None:
3781 if getattr(rl, 'reading', None) is not None:
3770
3782
3771 @contextlib.contextmanager
3783 @contextlib.contextmanager
3772 def lazy_reading(rl):
3784 def lazy_reading(rl):
3773 with rl.reading():
3785 with rl.reading():
3774 yield
3786 yield
3775
3787
3776 else:
3788 else:
3777
3789
3778 @contextlib.contextmanager
3790 @contextlib.contextmanager
3779 def lazy_reading(rl):
3791 def lazy_reading(rl):
3780 yield
3792 yield
3781
3793
3782 def doread():
3794 def doread():
3783 rl.clearcaches()
3795 rl.clearcaches()
3784 for rev in revs:
3796 for rev in revs:
3785 with lazy_reading(rl):
3797 with lazy_reading(rl):
3786 segmentforrevs(rev, rev)
3798 segmentforrevs(rev, rev)
3787
3799
3788 def doreadcachedfh():
3800 def doreadcachedfh():
3789 rl.clearcaches()
3801 rl.clearcaches()
3790 with reading(rl) as fh:
3802 with reading(rl) as fh:
3791 if fh is not None:
3803 if fh is not None:
3792 for rev in revs:
3804 for rev in revs:
3793 segmentforrevs(rev, rev, df=fh)
3805 segmentforrevs(rev, rev, df=fh)
3794 else:
3806 else:
3795 for rev in revs:
3807 for rev in revs:
3796 segmentforrevs(rev, rev)
3808 segmentforrevs(rev, rev)
3797
3809
3798 def doreadbatch():
3810 def doreadbatch():
3799 rl.clearcaches()
3811 rl.clearcaches()
3800 with lazy_reading(rl):
3812 with lazy_reading(rl):
3801 segmentforrevs(revs[0], revs[-1])
3813 segmentforrevs(revs[0], revs[-1])
3802
3814
3803 def doreadbatchcachedfh():
3815 def doreadbatchcachedfh():
3804 rl.clearcaches()
3816 rl.clearcaches()
3805 with reading(rl) as fh:
3817 with reading(rl) as fh:
3806 if fh is not None:
3818 if fh is not None:
3807 segmentforrevs(revs[0], revs[-1], df=fh)
3819 segmentforrevs(revs[0], revs[-1], df=fh)
3808 else:
3820 else:
3809 segmentforrevs(revs[0], revs[-1])
3821 segmentforrevs(revs[0], revs[-1])
3810
3822
3811 def dochunk():
3823 def dochunk():
3812 rl.clearcaches()
3824 rl.clearcaches()
3813 # chunk used to be available directly on the revlog
3825 # chunk used to be available directly on the revlog
3814 _chunk = getattr(rl, '_inner', rl)._chunk
3826 _chunk = getattr(rl, '_inner', rl)._chunk
3815 with reading(rl) as fh:
3827 with reading(rl) as fh:
3816 if fh is not None:
3828 if fh is not None:
3817 for rev in revs:
3829 for rev in revs:
3818 _chunk(rev, df=fh)
3830 _chunk(rev, df=fh)
3819 else:
3831 else:
3820 for rev in revs:
3832 for rev in revs:
3821 _chunk(rev)
3833 _chunk(rev)
3822
3834
3823 chunks = [None]
3835 chunks = [None]
3824
3836
3825 def dochunkbatch():
3837 def dochunkbatch():
3826 rl.clearcaches()
3838 rl.clearcaches()
3827 _chunks = getattr(rl, '_inner', rl)._chunks
3839 _chunks = getattr(rl, '_inner', rl)._chunks
3828 with reading(rl) as fh:
3840 with reading(rl) as fh:
3829 if fh is not None:
3841 if fh is not None:
3830 # Save chunks as a side-effect.
3842 # Save chunks as a side-effect.
3831 chunks[0] = _chunks(revs, df=fh)
3843 chunks[0] = _chunks(revs, df=fh)
3832 else:
3844 else:
3833 # Save chunks as a side-effect.
3845 # Save chunks as a side-effect.
3834 chunks[0] = _chunks(revs)
3846 chunks[0] = _chunks(revs)
3835
3847
3836 def docompress(compressor):
3848 def docompress(compressor):
3837 rl.clearcaches()
3849 rl.clearcaches()
3838
3850
3839 compressor_holder = getattr(rl, '_inner', rl)
3851 compressor_holder = getattr(rl, '_inner', rl)
3840
3852
3841 try:
3853 try:
3842 # Swap in the requested compression engine.
3854 # Swap in the requested compression engine.
3843 oldcompressor = compressor_holder._compressor
3855 oldcompressor = compressor_holder._compressor
3844 compressor_holder._compressor = compressor
3856 compressor_holder._compressor = compressor
3845 for chunk in chunks[0]:
3857 for chunk in chunks[0]:
3846 rl.compress(chunk)
3858 rl.compress(chunk)
3847 finally:
3859 finally:
3848 compressor_holder._compressor = oldcompressor
3860 compressor_holder._compressor = oldcompressor
3849
3861
3850 benches = [
3862 benches = [
3851 (lambda: doread(), b'read'),
3863 (lambda: doread(), b'read'),
3852 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3864 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3853 (lambda: doreadbatch(), b'read batch'),
3865 (lambda: doreadbatch(), b'read batch'),
3854 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3866 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3855 (lambda: dochunk(), b'chunk'),
3867 (lambda: dochunk(), b'chunk'),
3856 (lambda: dochunkbatch(), b'chunk batch'),
3868 (lambda: dochunkbatch(), b'chunk batch'),
3857 ]
3869 ]
3858
3870
3859 for engine in sorted(engines):
3871 for engine in sorted(engines):
3860 compressor = util.compengines[engine].revlogcompressor()
3872 compressor = util.compengines[engine].revlogcompressor()
3861 benches.append(
3873 benches.append(
3862 (
3874 (
3863 functools.partial(docompress, compressor),
3875 functools.partial(docompress, compressor),
3864 b'compress w/ %s' % engine,
3876 b'compress w/ %s' % engine,
3865 )
3877 )
3866 )
3878 )
3867
3879
3868 for fn, title in benches:
3880 for fn, title in benches:
3869 timer, fm = gettimer(ui, opts)
3881 timer, fm = gettimer(ui, opts)
3870 timer(fn, title=title)
3882 timer(fn, title=title)
3871 fm.end()
3883 fm.end()
3872
3884
3873
3885
3874 @command(
3886 @command(
3875 b'perf::revlogrevision|perfrevlogrevision',
3887 b'perf::revlogrevision|perfrevlogrevision',
3876 revlogopts
3888 revlogopts
3877 + formatteropts
3889 + formatteropts
3878 + [(b'', b'cache', False, b'use caches instead of clearing')],
3890 + [(b'', b'cache', False, b'use caches instead of clearing')],
3879 b'-c|-m|FILE REV',
3891 b'-c|-m|FILE REV',
3880 )
3892 )
3881 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3893 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3882 """Benchmark obtaining a revlog revision.
3894 """Benchmark obtaining a revlog revision.
3883
3895
3884 Obtaining a revlog revision consists of roughly the following steps:
3896 Obtaining a revlog revision consists of roughly the following steps:
3885
3897
3886 1. Compute the delta chain
3898 1. Compute the delta chain
3887 2. Slice the delta chain if applicable
3899 2. Slice the delta chain if applicable
3888 3. Obtain the raw chunks for that delta chain
3900 3. Obtain the raw chunks for that delta chain
3889 4. Decompress each raw chunk
3901 4. Decompress each raw chunk
3890 5. Apply binary patches to obtain fulltext
3902 5. Apply binary patches to obtain fulltext
3891 6. Verify hash of fulltext
3903 6. Verify hash of fulltext
3892
3904
3893 This command measures the time spent in each of these phases.
3905 This command measures the time spent in each of these phases.
3894 """
3906 """
3895 opts = _byteskwargs(opts)
3907 opts = _byteskwargs(opts)
3896
3908
3897 if opts.get(b'changelog') or opts.get(b'manifest'):
3909 if opts.get(b'changelog') or opts.get(b'manifest'):
3898 file_, rev = None, file_
3910 file_, rev = None, file_
3899 elif rev is None:
3911 elif rev is None:
3900 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3912 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3901
3913
3902 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3914 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3903
3915
3904 # _chunkraw was renamed to _getsegmentforrevs.
3916 # _chunkraw was renamed to _getsegmentforrevs.
3905 try:
3917 try:
3906 segmentforrevs = r._inner.get_segment_for_revs
3918 segmentforrevs = r._inner.get_segment_for_revs
3907 except AttributeError:
3919 except AttributeError:
3908 try:
3920 try:
3909 segmentforrevs = r._getsegmentforrevs
3921 segmentforrevs = r._getsegmentforrevs
3910 except AttributeError:
3922 except AttributeError:
3911 segmentforrevs = r._chunkraw
3923 segmentforrevs = r._chunkraw
3912
3924
3913 node = r.lookup(rev)
3925 node = r.lookup(rev)
3914 rev = r.rev(node)
3926 rev = r.rev(node)
3915
3927
3916 if getattr(r, 'reading', None) is not None:
3928 if getattr(r, 'reading', None) is not None:
3917
3929
3918 @contextlib.contextmanager
3930 @contextlib.contextmanager
3919 def lazy_reading(r):
3931 def lazy_reading(r):
3920 with r.reading():
3932 with r.reading():
3921 yield
3933 yield
3922
3934
3923 else:
3935 else:
3924
3936
3925 @contextlib.contextmanager
3937 @contextlib.contextmanager
3926 def lazy_reading(r):
3938 def lazy_reading(r):
3927 yield
3939 yield
3928
3940
3929 def getrawchunks(data, chain):
3941 def getrawchunks(data, chain):
3930 start = r.start
3942 start = r.start
3931 length = r.length
3943 length = r.length
3932 inline = r._inline
3944 inline = r._inline
3933 try:
3945 try:
3934 iosize = r.index.entry_size
3946 iosize = r.index.entry_size
3935 except AttributeError:
3947 except AttributeError:
3936 iosize = r._io.size
3948 iosize = r._io.size
3937 buffer = util.buffer
3949 buffer = util.buffer
3938
3950
3939 chunks = []
3951 chunks = []
3940 ladd = chunks.append
3952 ladd = chunks.append
3941 for idx, item in enumerate(chain):
3953 for idx, item in enumerate(chain):
3942 offset = start(item[0])
3954 offset = start(item[0])
3943 bits = data[idx]
3955 bits = data[idx]
3944 for rev in item:
3956 for rev in item:
3945 chunkstart = start(rev)
3957 chunkstart = start(rev)
3946 if inline:
3958 if inline:
3947 chunkstart += (rev + 1) * iosize
3959 chunkstart += (rev + 1) * iosize
3948 chunklength = length(rev)
3960 chunklength = length(rev)
3949 ladd(buffer(bits, chunkstart - offset, chunklength))
3961 ladd(buffer(bits, chunkstart - offset, chunklength))
3950
3962
3951 return chunks
3963 return chunks
3952
3964
3953 def dodeltachain(rev):
3965 def dodeltachain(rev):
3954 if not cache:
3966 if not cache:
3955 r.clearcaches()
3967 r.clearcaches()
3956 r._deltachain(rev)
3968 r._deltachain(rev)
3957
3969
3958 def doread(chain):
3970 def doread(chain):
3959 if not cache:
3971 if not cache:
3960 r.clearcaches()
3972 r.clearcaches()
3961 for item in slicedchain:
3973 for item in slicedchain:
3962 with lazy_reading(r):
3974 with lazy_reading(r):
3963 segmentforrevs(item[0], item[-1])
3975 segmentforrevs(item[0], item[-1])
3964
3976
3965 def doslice(r, chain, size):
3977 def doslice(r, chain, size):
3966 for s in slicechunk(r, chain, targetsize=size):
3978 for s in slicechunk(r, chain, targetsize=size):
3967 pass
3979 pass
3968
3980
3969 def dorawchunks(data, chain):
3981 def dorawchunks(data, chain):
3970 if not cache:
3982 if not cache:
3971 r.clearcaches()
3983 r.clearcaches()
3972 getrawchunks(data, chain)
3984 getrawchunks(data, chain)
3973
3985
3974 def dodecompress(chunks):
3986 def dodecompress(chunks):
3975 decomp = r.decompress
3987 decomp = r.decompress
3976 for chunk in chunks:
3988 for chunk in chunks:
3977 decomp(chunk)
3989 decomp(chunk)
3978
3990
3979 def dopatch(text, bins):
3991 def dopatch(text, bins):
3980 if not cache:
3992 if not cache:
3981 r.clearcaches()
3993 r.clearcaches()
3982 mdiff.patches(text, bins)
3994 mdiff.patches(text, bins)
3983
3995
3984 def dohash(text):
3996 def dohash(text):
3985 if not cache:
3997 if not cache:
3986 r.clearcaches()
3998 r.clearcaches()
3987 r.checkhash(text, node, rev=rev)
3999 r.checkhash(text, node, rev=rev)
3988
4000
3989 def dorevision():
4001 def dorevision():
3990 if not cache:
4002 if not cache:
3991 r.clearcaches()
4003 r.clearcaches()
3992 r.revision(node)
4004 r.revision(node)
3993
4005
3994 try:
4006 try:
3995 from mercurial.revlogutils.deltas import slicechunk
4007 from mercurial.revlogutils.deltas import slicechunk
3996 except ImportError:
4008 except ImportError:
3997 slicechunk = getattr(revlog, '_slicechunk', None)
4009 slicechunk = getattr(revlog, '_slicechunk', None)
3998
4010
3999 size = r.length(rev)
4011 size = r.length(rev)
4000 chain = r._deltachain(rev)[0]
4012 chain = r._deltachain(rev)[0]
4001
4013
4002 with_sparse_read = False
4014 with_sparse_read = False
4003 if hasattr(r, 'data_config'):
4015 if hasattr(r, 'data_config'):
4004 with_sparse_read = r.data_config.with_sparse_read
4016 with_sparse_read = r.data_config.with_sparse_read
4005 elif hasattr(r, '_withsparseread'):
4017 elif hasattr(r, '_withsparseread'):
4006 with_sparse_read = r._withsparseread
4018 with_sparse_read = r._withsparseread
4007 if with_sparse_read:
4019 if with_sparse_read:
4008 slicedchain = (chain,)
4020 slicedchain = (chain,)
4009 else:
4021 else:
4010 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4022 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4011 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4023 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4012 rawchunks = getrawchunks(data, slicedchain)
4024 rawchunks = getrawchunks(data, slicedchain)
4013 bins = r._inner._chunks(chain)
4025 bins = r._inner._chunks(chain)
4014 text = bytes(bins[0])
4026 text = bytes(bins[0])
4015 bins = bins[1:]
4027 bins = bins[1:]
4016 text = mdiff.patches(text, bins)
4028 text = mdiff.patches(text, bins)
4017
4029
4018 benches = [
4030 benches = [
4019 (lambda: dorevision(), b'full'),
4031 (lambda: dorevision(), b'full'),
4020 (lambda: dodeltachain(rev), b'deltachain'),
4032 (lambda: dodeltachain(rev), b'deltachain'),
4021 (lambda: doread(chain), b'read'),
4033 (lambda: doread(chain), b'read'),
4022 ]
4034 ]
4023
4035
4024 if with_sparse_read:
4036 if with_sparse_read:
4025 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4037 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4026 benches.append(slicing)
4038 benches.append(slicing)
4027
4039
4028 benches.extend(
4040 benches.extend(
4029 [
4041 [
4030 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4042 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4031 (lambda: dodecompress(rawchunks), b'decompress'),
4043 (lambda: dodecompress(rawchunks), b'decompress'),
4032 (lambda: dopatch(text, bins), b'patch'),
4044 (lambda: dopatch(text, bins), b'patch'),
4033 (lambda: dohash(text), b'hash'),
4045 (lambda: dohash(text), b'hash'),
4034 ]
4046 ]
4035 )
4047 )
4036
4048
4037 timer, fm = gettimer(ui, opts)
4049 timer, fm = gettimer(ui, opts)
4038 for fn, title in benches:
4050 for fn, title in benches:
4039 timer(fn, title=title)
4051 timer(fn, title=title)
4040 fm.end()
4052 fm.end()
4041
4053
4042
4054
4043 @command(
4055 @command(
4044 b'perf::revset|perfrevset',
4056 b'perf::revset|perfrevset',
4045 [
4057 [
4046 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4058 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4047 (b'', b'contexts', False, b'obtain changectx for each revision'),
4059 (b'', b'contexts', False, b'obtain changectx for each revision'),
4048 ]
4060 ]
4049 + formatteropts,
4061 + formatteropts,
4050 b"REVSET",
4062 b"REVSET",
4051 )
4063 )
4052 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4064 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4053 """benchmark the execution time of a revset
4065 """benchmark the execution time of a revset
4054
4066
4055 Use the --clean option if need to evaluate the impact of build volatile
4067 Use the --clean option if need to evaluate the impact of build volatile
4056 revisions set cache on the revset execution. Volatile cache hold filtered
4068 revisions set cache on the revset execution. Volatile cache hold filtered
4057 and obsolete related cache."""
4069 and obsolete related cache."""
4058 opts = _byteskwargs(opts)
4070 opts = _byteskwargs(opts)
4059
4071
4060 timer, fm = gettimer(ui, opts)
4072 timer, fm = gettimer(ui, opts)
4061
4073
4062 def d():
4074 def d():
4063 if clear:
4075 if clear:
4064 repo.invalidatevolatilesets()
4076 repo.invalidatevolatilesets()
4065 if contexts:
4077 if contexts:
4066 for ctx in repo.set(expr):
4078 for ctx in repo.set(expr):
4067 pass
4079 pass
4068 else:
4080 else:
4069 for r in repo.revs(expr):
4081 for r in repo.revs(expr):
4070 pass
4082 pass
4071
4083
4072 timer(d)
4084 timer(d)
4073 fm.end()
4085 fm.end()
4074
4086
4075
4087
4076 @command(
4088 @command(
4077 b'perf::volatilesets|perfvolatilesets',
4089 b'perf::volatilesets|perfvolatilesets',
4078 [
4090 [
4079 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4091 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4080 ]
4092 ]
4081 + formatteropts,
4093 + formatteropts,
4082 )
4094 )
4083 def perfvolatilesets(ui, repo, *names, **opts):
4095 def perfvolatilesets(ui, repo, *names, **opts):
4084 """benchmark the computation of various volatile set
4096 """benchmark the computation of various volatile set
4085
4097
4086 Volatile set computes element related to filtering and obsolescence."""
4098 Volatile set computes element related to filtering and obsolescence."""
4087 opts = _byteskwargs(opts)
4099 opts = _byteskwargs(opts)
4088 timer, fm = gettimer(ui, opts)
4100 timer, fm = gettimer(ui, opts)
4089 repo = repo.unfiltered()
4101 repo = repo.unfiltered()
4090
4102
4091 def getobs(name):
4103 def getobs(name):
4092 def d():
4104 def d():
4093 repo.invalidatevolatilesets()
4105 repo.invalidatevolatilesets()
4094 if opts[b'clear_obsstore']:
4106 if opts[b'clear_obsstore']:
4095 clearfilecache(repo, b'obsstore')
4107 clearfilecache(repo, b'obsstore')
4096 obsolete.getrevs(repo, name)
4108 obsolete.getrevs(repo, name)
4097
4109
4098 return d
4110 return d
4099
4111
4100 allobs = sorted(obsolete.cachefuncs)
4112 allobs = sorted(obsolete.cachefuncs)
4101 if names:
4113 if names:
4102 allobs = [n for n in allobs if n in names]
4114 allobs = [n for n in allobs if n in names]
4103
4115
4104 for name in allobs:
4116 for name in allobs:
4105 timer(getobs(name), title=name)
4117 timer(getobs(name), title=name)
4106
4118
4107 def getfiltered(name):
4119 def getfiltered(name):
4108 def d():
4120 def d():
4109 repo.invalidatevolatilesets()
4121 repo.invalidatevolatilesets()
4110 if opts[b'clear_obsstore']:
4122 if opts[b'clear_obsstore']:
4111 clearfilecache(repo, b'obsstore')
4123 clearfilecache(repo, b'obsstore')
4112 repoview.filterrevs(repo, name)
4124 repoview.filterrevs(repo, name)
4113
4125
4114 return d
4126 return d
4115
4127
4116 allfilter = sorted(repoview.filtertable)
4128 allfilter = sorted(repoview.filtertable)
4117 if names:
4129 if names:
4118 allfilter = [n for n in allfilter if n in names]
4130 allfilter = [n for n in allfilter if n in names]
4119
4131
4120 for name in allfilter:
4132 for name in allfilter:
4121 timer(getfiltered(name), title=name)
4133 timer(getfiltered(name), title=name)
4122 fm.end()
4134 fm.end()
4123
4135
4124
4136
4125 @command(
4137 @command(
4126 b'perf::branchmap|perfbranchmap',
4138 b'perf::branchmap|perfbranchmap',
4127 [
4139 [
4128 (b'f', b'full', False, b'Includes build time of subset'),
4140 (b'f', b'full', False, b'Includes build time of subset'),
4129 (
4141 (
4130 b'',
4142 b'',
4131 b'clear-revbranch',
4143 b'clear-revbranch',
4132 False,
4144 False,
4133 b'purge the revbranch cache between computation',
4145 b'purge the revbranch cache between computation',
4134 ),
4146 ),
4135 ]
4147 ]
4136 + formatteropts,
4148 + formatteropts,
4137 )
4149 )
4138 def perfbranchmap(ui, repo, *filternames, **opts):
4150 def perfbranchmap(ui, repo, *filternames, **opts):
4139 """benchmark the update of a branchmap
4151 """benchmark the update of a branchmap
4140
4152
4141 This benchmarks the full repo.branchmap() call with read and write disabled
4153 This benchmarks the full repo.branchmap() call with read and write disabled
4142 """
4154 """
4143 opts = _byteskwargs(opts)
4155 opts = _byteskwargs(opts)
4144 full = opts.get(b"full", False)
4156 full = opts.get(b"full", False)
4145 clear_revbranch = opts.get(b"clear_revbranch", False)
4157 clear_revbranch = opts.get(b"clear_revbranch", False)
4146 timer, fm = gettimer(ui, opts)
4158 timer, fm = gettimer(ui, opts)
4147
4159
4148 def getbranchmap(filtername):
4160 def getbranchmap(filtername):
4149 """generate a benchmark function for the filtername"""
4161 """generate a benchmark function for the filtername"""
4150 if filtername is None:
4162 if filtername is None:
4151 view = repo
4163 view = repo
4152 else:
4164 else:
4153 view = repo.filtered(filtername)
4165 view = repo.filtered(filtername)
4154 if util.safehasattr(view._branchcaches, '_per_filter'):
4166 if util.safehasattr(view._branchcaches, '_per_filter'):
4155 filtered = view._branchcaches._per_filter
4167 filtered = view._branchcaches._per_filter
4156 else:
4168 else:
4157 # older versions
4169 # older versions
4158 filtered = view._branchcaches
4170 filtered = view._branchcaches
4159
4171
4160 def d():
4172 def d():
4161 if clear_revbranch:
4173 if clear_revbranch:
4162 repo.revbranchcache()._clear()
4174 repo.revbranchcache()._clear()
4163 if full:
4175 if full:
4164 view._branchcaches.clear()
4176 view._branchcaches.clear()
4165 else:
4177 else:
4166 filtered.pop(filtername, None)
4178 filtered.pop(filtername, None)
4167 view.branchmap()
4179 view.branchmap()
4168
4180
4169 return d
4181 return d
4170
4182
4171 # add filter in smaller subset to bigger subset
4183 # add filter in smaller subset to bigger subset
4172 possiblefilters = set(repoview.filtertable)
4184 possiblefilters = set(repoview.filtertable)
4173 if filternames:
4185 if filternames:
4174 possiblefilters &= set(filternames)
4186 possiblefilters &= set(filternames)
4175 subsettable = getbranchmapsubsettable()
4187 subsettable = getbranchmapsubsettable()
4176 allfilters = []
4188 allfilters = []
4177 while possiblefilters:
4189 while possiblefilters:
4178 for name in possiblefilters:
4190 for name in possiblefilters:
4179 subset = subsettable.get(name)
4191 subset = subsettable.get(name)
4180 if subset not in possiblefilters:
4192 if subset not in possiblefilters:
4181 break
4193 break
4182 else:
4194 else:
4183 assert False, b'subset cycle %s!' % possiblefilters
4195 assert False, b'subset cycle %s!' % possiblefilters
4184 allfilters.append(name)
4196 allfilters.append(name)
4185 possiblefilters.remove(name)
4197 possiblefilters.remove(name)
4186
4198
4187 # warm the cache
4199 # warm the cache
4188 if not full:
4200 if not full:
4189 for name in allfilters:
4201 for name in allfilters:
4190 repo.filtered(name).branchmap()
4202 repo.filtered(name).branchmap()
4191 if not filternames or b'unfiltered' in filternames:
4203 if not filternames or b'unfiltered' in filternames:
4192 # add unfiltered
4204 # add unfiltered
4193 allfilters.append(None)
4205 allfilters.append(None)
4194
4206
4195 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4207 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4196 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4208 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4197 branchcacheread.set(classmethod(lambda *args: None))
4209 branchcacheread.set(classmethod(lambda *args: None))
4198 else:
4210 else:
4199 # older versions
4211 # older versions
4200 branchcacheread = safeattrsetter(branchmap, b'read')
4212 branchcacheread = safeattrsetter(branchmap, b'read')
4201 branchcacheread.set(lambda *args: None)
4213 branchcacheread.set(lambda *args: None)
4202 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4214 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4203 branchcachewrite.set(lambda *args: None)
4215 branchcachewrite.set(lambda *args: None)
4204 try:
4216 try:
4205 for name in allfilters:
4217 for name in allfilters:
4206 printname = name
4218 printname = name
4207 if name is None:
4219 if name is None:
4208 printname = b'unfiltered'
4220 printname = b'unfiltered'
4209 timer(getbranchmap(name), title=printname)
4221 timer(getbranchmap(name), title=printname)
4210 finally:
4222 finally:
4211 branchcacheread.restore()
4223 branchcacheread.restore()
4212 branchcachewrite.restore()
4224 branchcachewrite.restore()
4213 fm.end()
4225 fm.end()
4214
4226
4215
4227
4216 @command(
4228 @command(
4217 b'perf::branchmapupdate|perfbranchmapupdate',
4229 b'perf::branchmapupdate|perfbranchmapupdate',
4218 [
4230 [
4219 (b'', b'base', [], b'subset of revision to start from'),
4231 (b'', b'base', [], b'subset of revision to start from'),
4220 (b'', b'target', [], b'subset of revision to end with'),
4232 (b'', b'target', [], b'subset of revision to end with'),
4221 (b'', b'clear-caches', False, b'clear cache between each runs'),
4233 (b'', b'clear-caches', False, b'clear cache between each runs'),
4222 ]
4234 ]
4223 + formatteropts,
4235 + formatteropts,
4224 )
4236 )
4225 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4237 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4226 """benchmark branchmap update from for <base> revs to <target> revs
4238 """benchmark branchmap update from for <base> revs to <target> revs
4227
4239
4228 If `--clear-caches` is passed, the following items will be reset before
4240 If `--clear-caches` is passed, the following items will be reset before
4229 each update:
4241 each update:
4230 * the changelog instance and associated indexes
4242 * the changelog instance and associated indexes
4231 * the rev-branch-cache instance
4243 * the rev-branch-cache instance
4232
4244
4233 Examples:
4245 Examples:
4234
4246
4235 # update for the one last revision
4247 # update for the one last revision
4236 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4248 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4237
4249
4238 $ update for change coming with a new branch
4250 $ update for change coming with a new branch
4239 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4251 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4240 """
4252 """
4241 from mercurial import branchmap
4253 from mercurial import branchmap
4242 from mercurial import repoview
4254 from mercurial import repoview
4243
4255
4244 opts = _byteskwargs(opts)
4256 opts = _byteskwargs(opts)
4245 timer, fm = gettimer(ui, opts)
4257 timer, fm = gettimer(ui, opts)
4246 clearcaches = opts[b'clear_caches']
4258 clearcaches = opts[b'clear_caches']
4247 unfi = repo.unfiltered()
4259 unfi = repo.unfiltered()
4248 x = [None] # used to pass data between closure
4260 x = [None] # used to pass data between closure
4249
4261
4250 # we use a `list` here to avoid possible side effect from smartset
4262 # we use a `list` here to avoid possible side effect from smartset
4251 baserevs = list(scmutil.revrange(repo, base))
4263 baserevs = list(scmutil.revrange(repo, base))
4252 targetrevs = list(scmutil.revrange(repo, target))
4264 targetrevs = list(scmutil.revrange(repo, target))
4253 if not baserevs:
4265 if not baserevs:
4254 raise error.Abort(b'no revisions selected for --base')
4266 raise error.Abort(b'no revisions selected for --base')
4255 if not targetrevs:
4267 if not targetrevs:
4256 raise error.Abort(b'no revisions selected for --target')
4268 raise error.Abort(b'no revisions selected for --target')
4257
4269
4258 # make sure the target branchmap also contains the one in the base
4270 # make sure the target branchmap also contains the one in the base
4259 targetrevs = list(set(baserevs) | set(targetrevs))
4271 targetrevs = list(set(baserevs) | set(targetrevs))
4260 targetrevs.sort()
4272 targetrevs.sort()
4261
4273
4262 cl = repo.changelog
4274 cl = repo.changelog
4263 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4275 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4264 allbaserevs.sort()
4276 allbaserevs.sort()
4265 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4277 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4266
4278
4267 newrevs = list(alltargetrevs.difference(allbaserevs))
4279 newrevs = list(alltargetrevs.difference(allbaserevs))
4268 newrevs.sort()
4280 newrevs.sort()
4269
4281
4270 allrevs = frozenset(unfi.changelog.revs())
4282 allrevs = frozenset(unfi.changelog.revs())
4271 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4283 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4272 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4284 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4273
4285
4274 def basefilter(repo, visibilityexceptions=None):
4286 def basefilter(repo, visibilityexceptions=None):
4275 return basefilterrevs
4287 return basefilterrevs
4276
4288
4277 def targetfilter(repo, visibilityexceptions=None):
4289 def targetfilter(repo, visibilityexceptions=None):
4278 return targetfilterrevs
4290 return targetfilterrevs
4279
4291
4280 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4292 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4281 ui.status(msg % (len(allbaserevs), len(newrevs)))
4293 ui.status(msg % (len(allbaserevs), len(newrevs)))
4282 if targetfilterrevs:
4294 if targetfilterrevs:
4283 msg = b'(%d revisions still filtered)\n'
4295 msg = b'(%d revisions still filtered)\n'
4284 ui.status(msg % len(targetfilterrevs))
4296 ui.status(msg % len(targetfilterrevs))
4285
4297
4286 try:
4298 try:
4287 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4299 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4288 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4300 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4289
4301
4290 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4302 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4291 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4303 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4292
4304
4293 # try to find an existing branchmap to reuse
4305 # try to find an existing branchmap to reuse
4294 subsettable = getbranchmapsubsettable()
4306 subsettable = getbranchmapsubsettable()
4295 candidatefilter = subsettable.get(None)
4307 candidatefilter = subsettable.get(None)
4296 while candidatefilter is not None:
4308 while candidatefilter is not None:
4297 candidatebm = repo.filtered(candidatefilter).branchmap()
4309 candidatebm = repo.filtered(candidatefilter).branchmap()
4298 if candidatebm.validfor(baserepo):
4310 if candidatebm.validfor(baserepo):
4299 filtered = repoview.filterrevs(repo, candidatefilter)
4311 filtered = repoview.filterrevs(repo, candidatefilter)
4300 missing = [r for r in allbaserevs if r in filtered]
4312 missing = [r for r in allbaserevs if r in filtered]
4301 base = candidatebm.copy()
4313 base = candidatebm.copy()
4302 base.update(baserepo, missing)
4314 base.update(baserepo, missing)
4303 break
4315 break
4304 candidatefilter = subsettable.get(candidatefilter)
4316 candidatefilter = subsettable.get(candidatefilter)
4305 else:
4317 else:
4306 # no suitable subset where found
4318 # no suitable subset where found
4307 base = branchmap.branchcache()
4319 base = branchmap.branchcache()
4308 base.update(baserepo, allbaserevs)
4320 base.update(baserepo, allbaserevs)
4309
4321
4310 def setup():
4322 def setup():
4311 x[0] = base.copy()
4323 x[0] = base.copy()
4312 if clearcaches:
4324 if clearcaches:
4313 unfi._revbranchcache = None
4325 unfi._revbranchcache = None
4314 clearchangelog(repo)
4326 clearchangelog(repo)
4315
4327
4316 def bench():
4328 def bench():
4317 x[0].update(targetrepo, newrevs)
4329 x[0].update(targetrepo, newrevs)
4318
4330
4319 timer(bench, setup=setup)
4331 timer(bench, setup=setup)
4320 fm.end()
4332 fm.end()
4321 finally:
4333 finally:
4322 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4334 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4323 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4335 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4324
4336
4325
4337
4326 @command(
4338 @command(
4327 b'perf::branchmapload|perfbranchmapload',
4339 b'perf::branchmapload|perfbranchmapload',
4328 [
4340 [
4329 (b'f', b'filter', b'', b'Specify repoview filter'),
4341 (b'f', b'filter', b'', b'Specify repoview filter'),
4330 (b'', b'list', False, b'List brachmap filter caches'),
4342 (b'', b'list', False, b'List brachmap filter caches'),
4331 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4343 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4332 ]
4344 ]
4333 + formatteropts,
4345 + formatteropts,
4334 )
4346 )
4335 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4347 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4336 """benchmark reading the branchmap"""
4348 """benchmark reading the branchmap"""
4337 opts = _byteskwargs(opts)
4349 opts = _byteskwargs(opts)
4338 clearrevlogs = opts[b'clear_revlogs']
4350 clearrevlogs = opts[b'clear_revlogs']
4339
4351
4340 if list:
4352 if list:
4341 for name, kind, st in repo.cachevfs.readdir(stat=True):
4353 for name, kind, st in repo.cachevfs.readdir(stat=True):
4342 if name.startswith(b'branch2'):
4354 if name.startswith(b'branch2'):
4343 filtername = name.partition(b'-')[2] or b'unfiltered'
4355 filtername = name.partition(b'-')[2] or b'unfiltered'
4344 ui.status(
4356 ui.status(
4345 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4357 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4346 )
4358 )
4347 return
4359 return
4348 if not filter:
4360 if not filter:
4349 filter = None
4361 filter = None
4350 subsettable = getbranchmapsubsettable()
4362 subsettable = getbranchmapsubsettable()
4351 if filter is None:
4363 if filter is None:
4352 repo = repo.unfiltered()
4364 repo = repo.unfiltered()
4353 else:
4365 else:
4354 repo = repoview.repoview(repo, filter)
4366 repo = repoview.repoview(repo, filter)
4355
4367
4356 repo.branchmap() # make sure we have a relevant, up to date branchmap
4368 repo.branchmap() # make sure we have a relevant, up to date branchmap
4357
4369
4358 try:
4370 try:
4359 fromfile = branchmap.branchcache.fromfile
4371 fromfile = branchmap.branchcache.fromfile
4360 except AttributeError:
4372 except AttributeError:
4361 # older versions
4373 # older versions
4362 fromfile = branchmap.read
4374 fromfile = branchmap.read
4363
4375
4364 currentfilter = filter
4376 currentfilter = filter
4365 # try once without timer, the filter may not be cached
4377 # try once without timer, the filter may not be cached
4366 while fromfile(repo) is None:
4378 while fromfile(repo) is None:
4367 currentfilter = subsettable.get(currentfilter)
4379 currentfilter = subsettable.get(currentfilter)
4368 if currentfilter is None:
4380 if currentfilter is None:
4369 raise error.Abort(
4381 raise error.Abort(
4370 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4382 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4371 )
4383 )
4372 repo = repo.filtered(currentfilter)
4384 repo = repo.filtered(currentfilter)
4373 timer, fm = gettimer(ui, opts)
4385 timer, fm = gettimer(ui, opts)
4374
4386
4375 def setup():
4387 def setup():
4376 if clearrevlogs:
4388 if clearrevlogs:
4377 clearchangelog(repo)
4389 clearchangelog(repo)
4378
4390
4379 def bench():
4391 def bench():
4380 fromfile(repo)
4392 fromfile(repo)
4381
4393
4382 timer(bench, setup=setup)
4394 timer(bench, setup=setup)
4383 fm.end()
4395 fm.end()
4384
4396
4385
4397
4386 @command(b'perf::loadmarkers|perfloadmarkers')
4398 @command(b'perf::loadmarkers|perfloadmarkers')
4387 def perfloadmarkers(ui, repo):
4399 def perfloadmarkers(ui, repo):
4388 """benchmark the time to parse the on-disk markers for a repo
4400 """benchmark the time to parse the on-disk markers for a repo
4389
4401
4390 Result is the number of markers in the repo."""
4402 Result is the number of markers in the repo."""
4391 timer, fm = gettimer(ui)
4403 timer, fm = gettimer(ui)
4392 svfs = getsvfs(repo)
4404 svfs = getsvfs(repo)
4393 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4405 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4394 fm.end()
4406 fm.end()
4395
4407
4396
4408
4397 @command(
4409 @command(
4398 b'perf::lrucachedict|perflrucachedict',
4410 b'perf::lrucachedict|perflrucachedict',
4399 formatteropts
4411 formatteropts
4400 + [
4412 + [
4401 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4413 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4402 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4414 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4403 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4415 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4404 (b'', b'size', 4, b'size of cache'),
4416 (b'', b'size', 4, b'size of cache'),
4405 (b'', b'gets', 10000, b'number of key lookups'),
4417 (b'', b'gets', 10000, b'number of key lookups'),
4406 (b'', b'sets', 10000, b'number of key sets'),
4418 (b'', b'sets', 10000, b'number of key sets'),
4407 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4419 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4408 (
4420 (
4409 b'',
4421 b'',
4410 b'mixedgetfreq',
4422 b'mixedgetfreq',
4411 50,
4423 50,
4412 b'frequency of get vs set ops in mixed mode',
4424 b'frequency of get vs set ops in mixed mode',
4413 ),
4425 ),
4414 ],
4426 ],
4415 norepo=True,
4427 norepo=True,
4416 )
4428 )
4417 def perflrucache(
4429 def perflrucache(
4418 ui,
4430 ui,
4419 mincost=0,
4431 mincost=0,
4420 maxcost=100,
4432 maxcost=100,
4421 costlimit=0,
4433 costlimit=0,
4422 size=4,
4434 size=4,
4423 gets=10000,
4435 gets=10000,
4424 sets=10000,
4436 sets=10000,
4425 mixed=10000,
4437 mixed=10000,
4426 mixedgetfreq=50,
4438 mixedgetfreq=50,
4427 **opts
4439 **opts
4428 ):
4440 ):
4429 opts = _byteskwargs(opts)
4441 opts = _byteskwargs(opts)
4430
4442
4431 def doinit():
4443 def doinit():
4432 for i in _xrange(10000):
4444 for i in _xrange(10000):
4433 util.lrucachedict(size)
4445 util.lrucachedict(size)
4434
4446
4435 costrange = list(range(mincost, maxcost + 1))
4447 costrange = list(range(mincost, maxcost + 1))
4436
4448
4437 values = []
4449 values = []
4438 for i in _xrange(size):
4450 for i in _xrange(size):
4439 values.append(random.randint(0, _maxint))
4451 values.append(random.randint(0, _maxint))
4440
4452
4441 # Get mode fills the cache and tests raw lookup performance with no
4453 # Get mode fills the cache and tests raw lookup performance with no
4442 # eviction.
4454 # eviction.
4443 getseq = []
4455 getseq = []
4444 for i in _xrange(gets):
4456 for i in _xrange(gets):
4445 getseq.append(random.choice(values))
4457 getseq.append(random.choice(values))
4446
4458
4447 def dogets():
4459 def dogets():
4448 d = util.lrucachedict(size)
4460 d = util.lrucachedict(size)
4449 for v in values:
4461 for v in values:
4450 d[v] = v
4462 d[v] = v
4451 for key in getseq:
4463 for key in getseq:
4452 value = d[key]
4464 value = d[key]
4453 value # silence pyflakes warning
4465 value # silence pyflakes warning
4454
4466
4455 def dogetscost():
4467 def dogetscost():
4456 d = util.lrucachedict(size, maxcost=costlimit)
4468 d = util.lrucachedict(size, maxcost=costlimit)
4457 for i, v in enumerate(values):
4469 for i, v in enumerate(values):
4458 d.insert(v, v, cost=costs[i])
4470 d.insert(v, v, cost=costs[i])
4459 for key in getseq:
4471 for key in getseq:
4460 try:
4472 try:
4461 value = d[key]
4473 value = d[key]
4462 value # silence pyflakes warning
4474 value # silence pyflakes warning
4463 except KeyError:
4475 except KeyError:
4464 pass
4476 pass
4465
4477
4466 # Set mode tests insertion speed with cache eviction.
4478 # Set mode tests insertion speed with cache eviction.
4467 setseq = []
4479 setseq = []
4468 costs = []
4480 costs = []
4469 for i in _xrange(sets):
4481 for i in _xrange(sets):
4470 setseq.append(random.randint(0, _maxint))
4482 setseq.append(random.randint(0, _maxint))
4471 costs.append(random.choice(costrange))
4483 costs.append(random.choice(costrange))
4472
4484
4473 def doinserts():
4485 def doinserts():
4474 d = util.lrucachedict(size)
4486 d = util.lrucachedict(size)
4475 for v in setseq:
4487 for v in setseq:
4476 d.insert(v, v)
4488 d.insert(v, v)
4477
4489
4478 def doinsertscost():
4490 def doinsertscost():
4479 d = util.lrucachedict(size, maxcost=costlimit)
4491 d = util.lrucachedict(size, maxcost=costlimit)
4480 for i, v in enumerate(setseq):
4492 for i, v in enumerate(setseq):
4481 d.insert(v, v, cost=costs[i])
4493 d.insert(v, v, cost=costs[i])
4482
4494
4483 def dosets():
4495 def dosets():
4484 d = util.lrucachedict(size)
4496 d = util.lrucachedict(size)
4485 for v in setseq:
4497 for v in setseq:
4486 d[v] = v
4498 d[v] = v
4487
4499
4488 # Mixed mode randomly performs gets and sets with eviction.
4500 # Mixed mode randomly performs gets and sets with eviction.
4489 mixedops = []
4501 mixedops = []
4490 for i in _xrange(mixed):
4502 for i in _xrange(mixed):
4491 r = random.randint(0, 100)
4503 r = random.randint(0, 100)
4492 if r < mixedgetfreq:
4504 if r < mixedgetfreq:
4493 op = 0
4505 op = 0
4494 else:
4506 else:
4495 op = 1
4507 op = 1
4496
4508
4497 mixedops.append(
4509 mixedops.append(
4498 (op, random.randint(0, size * 2), random.choice(costrange))
4510 (op, random.randint(0, size * 2), random.choice(costrange))
4499 )
4511 )
4500
4512
4501 def domixed():
4513 def domixed():
4502 d = util.lrucachedict(size)
4514 d = util.lrucachedict(size)
4503
4515
4504 for op, v, cost in mixedops:
4516 for op, v, cost in mixedops:
4505 if op == 0:
4517 if op == 0:
4506 try:
4518 try:
4507 d[v]
4519 d[v]
4508 except KeyError:
4520 except KeyError:
4509 pass
4521 pass
4510 else:
4522 else:
4511 d[v] = v
4523 d[v] = v
4512
4524
4513 def domixedcost():
4525 def domixedcost():
4514 d = util.lrucachedict(size, maxcost=costlimit)
4526 d = util.lrucachedict(size, maxcost=costlimit)
4515
4527
4516 for op, v, cost in mixedops:
4528 for op, v, cost in mixedops:
4517 if op == 0:
4529 if op == 0:
4518 try:
4530 try:
4519 d[v]
4531 d[v]
4520 except KeyError:
4532 except KeyError:
4521 pass
4533 pass
4522 else:
4534 else:
4523 d.insert(v, v, cost=cost)
4535 d.insert(v, v, cost=cost)
4524
4536
4525 benches = [
4537 benches = [
4526 (doinit, b'init'),
4538 (doinit, b'init'),
4527 ]
4539 ]
4528
4540
4529 if costlimit:
4541 if costlimit:
4530 benches.extend(
4542 benches.extend(
4531 [
4543 [
4532 (dogetscost, b'gets w/ cost limit'),
4544 (dogetscost, b'gets w/ cost limit'),
4533 (doinsertscost, b'inserts w/ cost limit'),
4545 (doinsertscost, b'inserts w/ cost limit'),
4534 (domixedcost, b'mixed w/ cost limit'),
4546 (domixedcost, b'mixed w/ cost limit'),
4535 ]
4547 ]
4536 )
4548 )
4537 else:
4549 else:
4538 benches.extend(
4550 benches.extend(
4539 [
4551 [
4540 (dogets, b'gets'),
4552 (dogets, b'gets'),
4541 (doinserts, b'inserts'),
4553 (doinserts, b'inserts'),
4542 (dosets, b'sets'),
4554 (dosets, b'sets'),
4543 (domixed, b'mixed'),
4555 (domixed, b'mixed'),
4544 ]
4556 ]
4545 )
4557 )
4546
4558
4547 for fn, title in benches:
4559 for fn, title in benches:
4548 timer, fm = gettimer(ui, opts)
4560 timer, fm = gettimer(ui, opts)
4549 timer(fn, title=title)
4561 timer(fn, title=title)
4550 fm.end()
4562 fm.end()
4551
4563
4552
4564
4553 @command(
4565 @command(
4554 b'perf::write|perfwrite',
4566 b'perf::write|perfwrite',
4555 formatteropts
4567 formatteropts
4556 + [
4568 + [
4557 (b'', b'write-method', b'write', b'ui write method'),
4569 (b'', b'write-method', b'write', b'ui write method'),
4558 (b'', b'nlines', 100, b'number of lines'),
4570 (b'', b'nlines', 100, b'number of lines'),
4559 (b'', b'nitems', 100, b'number of items (per line)'),
4571 (b'', b'nitems', 100, b'number of items (per line)'),
4560 (b'', b'item', b'x', b'item that is written'),
4572 (b'', b'item', b'x', b'item that is written'),
4561 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4573 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4562 (b'', b'flush-line', None, b'flush after each line'),
4574 (b'', b'flush-line', None, b'flush after each line'),
4563 ],
4575 ],
4564 )
4576 )
4565 def perfwrite(ui, repo, **opts):
4577 def perfwrite(ui, repo, **opts):
4566 """microbenchmark ui.write (and others)"""
4578 """microbenchmark ui.write (and others)"""
4567 opts = _byteskwargs(opts)
4579 opts = _byteskwargs(opts)
4568
4580
4569 write = getattr(ui, _sysstr(opts[b'write_method']))
4581 write = getattr(ui, _sysstr(opts[b'write_method']))
4570 nlines = int(opts[b'nlines'])
4582 nlines = int(opts[b'nlines'])
4571 nitems = int(opts[b'nitems'])
4583 nitems = int(opts[b'nitems'])
4572 item = opts[b'item']
4584 item = opts[b'item']
4573 batch_line = opts.get(b'batch_line')
4585 batch_line = opts.get(b'batch_line')
4574 flush_line = opts.get(b'flush_line')
4586 flush_line = opts.get(b'flush_line')
4575
4587
4576 if batch_line:
4588 if batch_line:
4577 line = item * nitems + b'\n'
4589 line = item * nitems + b'\n'
4578
4590
4579 def benchmark():
4591 def benchmark():
4580 for i in pycompat.xrange(nlines):
4592 for i in pycompat.xrange(nlines):
4581 if batch_line:
4593 if batch_line:
4582 write(line)
4594 write(line)
4583 else:
4595 else:
4584 for i in pycompat.xrange(nitems):
4596 for i in pycompat.xrange(nitems):
4585 write(item)
4597 write(item)
4586 write(b'\n')
4598 write(b'\n')
4587 if flush_line:
4599 if flush_line:
4588 ui.flush()
4600 ui.flush()
4589 ui.flush()
4601 ui.flush()
4590
4602
4591 timer, fm = gettimer(ui, opts)
4603 timer, fm = gettimer(ui, opts)
4592 timer(benchmark)
4604 timer(benchmark)
4593 fm.end()
4605 fm.end()
4594
4606
4595
4607
4596 def uisetup(ui):
4608 def uisetup(ui):
4597 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4609 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4598 commands, b'debugrevlogopts'
4610 commands, b'debugrevlogopts'
4599 ):
4611 ):
4600 # for "historical portability":
4612 # for "historical portability":
4601 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4613 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4602 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4614 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4603 # openrevlog() should cause failure, because it has been
4615 # openrevlog() should cause failure, because it has been
4604 # available since 3.5 (or 49c583ca48c4).
4616 # available since 3.5 (or 49c583ca48c4).
4605 def openrevlog(orig, repo, cmd, file_, opts):
4617 def openrevlog(orig, repo, cmd, file_, opts):
4606 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4618 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4607 raise error.Abort(
4619 raise error.Abort(
4608 b"This version doesn't support --dir option",
4620 b"This version doesn't support --dir option",
4609 hint=b"use 3.5 or later",
4621 hint=b"use 3.5 or later",
4610 )
4622 )
4611 return orig(repo, cmd, file_, opts)
4623 return orig(repo, cmd, file_, opts)
4612
4624
4613 name = _sysstr(b'openrevlog')
4625 name = _sysstr(b'openrevlog')
4614 extensions.wrapfunction(cmdutil, name, openrevlog)
4626 extensions.wrapfunction(cmdutil, name, openrevlog)
4615
4627
4616
4628
4617 @command(
4629 @command(
4618 b'perf::progress|perfprogress',
4630 b'perf::progress|perfprogress',
4619 formatteropts
4631 formatteropts
4620 + [
4632 + [
4621 (b'', b'topic', b'topic', b'topic for progress messages'),
4633 (b'', b'topic', b'topic', b'topic for progress messages'),
4622 (b'c', b'total', 1000000, b'total value we are progressing to'),
4634 (b'c', b'total', 1000000, b'total value we are progressing to'),
4623 ],
4635 ],
4624 norepo=True,
4636 norepo=True,
4625 )
4637 )
4626 def perfprogress(ui, topic=None, total=None, **opts):
4638 def perfprogress(ui, topic=None, total=None, **opts):
4627 """printing of progress bars"""
4639 """printing of progress bars"""
4628 opts = _byteskwargs(opts)
4640 opts = _byteskwargs(opts)
4629
4641
4630 timer, fm = gettimer(ui, opts)
4642 timer, fm = gettimer(ui, opts)
4631
4643
4632 def doprogress():
4644 def doprogress():
4633 with ui.makeprogress(topic, total=total) as progress:
4645 with ui.makeprogress(topic, total=total) as progress:
4634 for i in _xrange(total):
4646 for i in _xrange(total):
4635 progress.increment()
4647 progress.increment()
4636
4648
4637 timer(doprogress)
4649 timer(doprogress)
4638 fm.end()
4650 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now