##// END OF EJS Templates
phases: use a more generic way to trigger a phases computation for perf...
marmoute -
r52306:8fc92193 default
parent child Browse files
Show More
@@ -1,4637 +1,4638 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", True)
459 displayall = ui.configbool(b"perf", b"all-timing", True)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
886 from mercurial import tags
887
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
889
890
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
892 from mercurial import tags
893
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
895
896
896
897 def _default_forget_fnodes(repo, revs):
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
899 fnodes cache"""
900 from mercurial import tags
900 from mercurial import tags
901
901
902 missing_1 = b'\xff' * 4
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
907 cache.write()
908
908
909
909
910 @command(
910 @command(
911 b'perf::tags|perftags',
911 b'perf::tags|perftags',
912 formatteropts
912 formatteropts
913 + [
913 + [
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
915 (
916 b'',
916 b'',
917 b'clear-on-disk-cache',
917 b'clear-on-disk-cache',
918 False,
918 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
919 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
920 ),
921 (
921 (
922 b'',
922 b'',
923 b'clear-fnode-cache-all',
923 b'clear-fnode-cache-all',
924 False,
924 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
925 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
926 ),
927 (
927 (
928 b'',
928 b'',
929 b'clear-fnode-cache-rev',
929 b'clear-fnode-cache-rev',
930 [],
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
932 b'REVS',
933 ),
933 ),
934 (
934 (
935 b'',
935 b'',
936 b'update-last',
936 b'update-last',
937 b'',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
939 b'N',
940 ),
940 ),
941 ],
941 ],
942 )
942 )
943 def perftags(ui, repo, **opts):
943 def perftags(ui, repo, **opts):
944 """Benchmark tags retrieval in various situation
944 """Benchmark tags retrieval in various situation
945
945
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 altering performance after the command was run. However, it does not
947 altering performance after the command was run. However, it does not
948 destroy any stored data.
948 destroy any stored data.
949 """
949 """
950 from mercurial import tags
950 from mercurial import tags
951
951
952 opts = _byteskwargs(opts)
952 opts = _byteskwargs(opts)
953 timer, fm = gettimer(ui, opts)
953 timer, fm = gettimer(ui, opts)
954 repocleartagscache = repocleartagscachefunc(repo)
954 repocleartagscache = repocleartagscachefunc(repo)
955 clearrevlogs = opts[b'clear_revlogs']
955 clearrevlogs = opts[b'clear_revlogs']
956 clear_disk = opts[b'clear_on_disk_cache']
956 clear_disk = opts[b'clear_on_disk_cache']
957 clear_fnode = opts[b'clear_fnode_cache_all']
957 clear_fnode = opts[b'clear_fnode_cache_all']
958
958
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
960 update_last_str = opts[b'update_last']
961 update_last = None
961 update_last = None
962 if update_last_str:
962 if update_last_str:
963 try:
963 try:
964 update_last = int(update_last_str)
964 update_last = int(update_last_str)
965 except ValueError:
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
967 msg %= update_last_str
968 hint = b'value should be an integer'
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
969 raise error.Abort(msg, hint=hint)
970
970
971 clear_disk_fn = getattr(
971 clear_disk_fn = getattr(
972 tags,
972 tags,
973 "clear_cache_on_disk",
973 "clear_cache_on_disk",
974 _default_clear_on_disk_tags_cache,
974 _default_clear_on_disk_tags_cache,
975 )
975 )
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
977 clear_fnodes_fn = tags.clear_cache_fnodes
977 clear_fnodes_fn = tags.clear_cache_fnodes
978 else:
978 else:
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
980 clear_fnodes_rev_fn = getattr(
980 clear_fnodes_rev_fn = getattr(
981 tags,
981 tags,
982 "forget_fnodes",
982 "forget_fnodes",
983 _default_forget_fnodes,
983 _default_forget_fnodes,
984 )
984 )
985
985
986 clear_revs = []
986 clear_revs = []
987 if clear_fnode_revs:
987 if clear_fnode_revs:
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
989
989
990 if update_last:
990 if update_last:
991 revset = b'last(all(), %d)' % update_last
991 revset = b'last(all(), %d)' % update_last
992 last_revs = repo.unfiltered().revs(revset)
992 last_revs = repo.unfiltered().revs(revset)
993 clear_revs.extend(last_revs)
993 clear_revs.extend(last_revs)
994
994
995 from mercurial import repoview
995 from mercurial import repoview
996
996
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 filter_id = repoview.extrafilter(repo.ui)
999 filter_id = repoview.extrafilter(repo.ui)
1000
1000
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 pre_repo = repo.filtered(filter_name)
1002 pre_repo = repo.filtered(filter_name)
1003 pre_repo.tags() # warm the cache
1003 pre_repo.tags() # warm the cache
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006
1006
1007 clear_revs = sorted(set(clear_revs))
1007 clear_revs = sorted(set(clear_revs))
1008
1008
1009 def s():
1009 def s():
1010 if update_last:
1010 if update_last:
1011 util.copyfile(old_tags_path, new_tags_path)
1011 util.copyfile(old_tags_path, new_tags_path)
1012 if clearrevlogs:
1012 if clearrevlogs:
1013 clearchangelog(repo)
1013 clearchangelog(repo)
1014 clearfilecache(repo.unfiltered(), 'manifest')
1014 clearfilecache(repo.unfiltered(), 'manifest')
1015 if clear_disk:
1015 if clear_disk:
1016 clear_disk_fn(repo)
1016 clear_disk_fn(repo)
1017 if clear_fnode:
1017 if clear_fnode:
1018 clear_fnodes_fn(repo)
1018 clear_fnodes_fn(repo)
1019 elif clear_revs:
1019 elif clear_revs:
1020 clear_fnodes_rev_fn(repo, clear_revs)
1020 clear_fnodes_rev_fn(repo, clear_revs)
1021 repocleartagscache()
1021 repocleartagscache()
1022
1022
1023 def t():
1023 def t():
1024 len(repo.tags())
1024 len(repo.tags())
1025
1025
1026 timer(t, setup=s)
1026 timer(t, setup=s)
1027 fm.end()
1027 fm.end()
1028
1028
1029
1029
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 def perfancestors(ui, repo, **opts):
1031 def perfancestors(ui, repo, **opts):
1032 opts = _byteskwargs(opts)
1032 opts = _byteskwargs(opts)
1033 timer, fm = gettimer(ui, opts)
1033 timer, fm = gettimer(ui, opts)
1034 heads = repo.changelog.headrevs()
1034 heads = repo.changelog.headrevs()
1035
1035
1036 def d():
1036 def d():
1037 for a in repo.changelog.ancestors(heads):
1037 for a in repo.changelog.ancestors(heads):
1038 pass
1038 pass
1039
1039
1040 timer(d)
1040 timer(d)
1041 fm.end()
1041 fm.end()
1042
1042
1043
1043
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 def perfancestorset(ui, repo, revset, **opts):
1045 def perfancestorset(ui, repo, revset, **opts):
1046 opts = _byteskwargs(opts)
1046 opts = _byteskwargs(opts)
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 revs = repo.revs(revset)
1048 revs = repo.revs(revset)
1049 heads = repo.changelog.headrevs()
1049 heads = repo.changelog.headrevs()
1050
1050
1051 def d():
1051 def d():
1052 s = repo.changelog.ancestors(heads)
1052 s = repo.changelog.ancestors(heads)
1053 for rev in revs:
1053 for rev in revs:
1054 rev in s
1054 rev in s
1055
1055
1056 timer(d)
1056 timer(d)
1057 fm.end()
1057 fm.end()
1058
1058
1059
1059
1060 @command(
1060 @command(
1061 b'perf::delta-find',
1061 b'perf::delta-find',
1062 revlogopts + formatteropts,
1062 revlogopts + formatteropts,
1063 b'-c|-m|FILE REV',
1063 b'-c|-m|FILE REV',
1064 )
1064 )
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 """benchmark the process of finding a valid delta for a revlog revision
1066 """benchmark the process of finding a valid delta for a revlog revision
1067
1067
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 This perf command measures how much time we spend in this process. It
1070 This perf command measures how much time we spend in this process. It
1071 operates on an already stored revision.
1071 operates on an already stored revision.
1072
1072
1073 See `hg help debug-delta-find` for another related command.
1073 See `hg help debug-delta-find` for another related command.
1074 """
1074 """
1075 from mercurial import revlogutils
1075 from mercurial import revlogutils
1076 import mercurial.revlogutils.deltas as deltautil
1076 import mercurial.revlogutils.deltas as deltautil
1077
1077
1078 opts = _byteskwargs(opts)
1078 opts = _byteskwargs(opts)
1079 if arg_2 is None:
1079 if arg_2 is None:
1080 file_ = None
1080 file_ = None
1081 rev = arg_1
1081 rev = arg_1
1082 else:
1082 else:
1083 file_ = arg_1
1083 file_ = arg_1
1084 rev = arg_2
1084 rev = arg_2
1085
1085
1086 repo = repo.unfiltered()
1086 repo = repo.unfiltered()
1087
1087
1088 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1089
1089
1090 rev = int(rev)
1090 rev = int(rev)
1091
1091
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093
1093
1094 deltacomputer = deltautil.deltacomputer(revlog)
1094 deltacomputer = deltautil.deltacomputer(revlog)
1095
1095
1096 node = revlog.node(rev)
1096 node = revlog.node(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1098 p1 = revlog.node(p1r)
1098 p1 = revlog.node(p1r)
1099 p2 = revlog.node(p2r)
1099 p2 = revlog.node(p2r)
1100 full_text = revlog.revision(rev)
1100 full_text = revlog.revision(rev)
1101 textlen = len(full_text)
1101 textlen = len(full_text)
1102 cachedelta = None
1102 cachedelta = None
1103 flags = revlog.flags(rev)
1103 flags = revlog.flags(rev)
1104
1104
1105 revinfo = revlogutils.revisioninfo(
1105 revinfo = revlogutils.revisioninfo(
1106 node,
1106 node,
1107 p1,
1107 p1,
1108 p2,
1108 p2,
1109 [full_text], # btext
1109 [full_text], # btext
1110 textlen,
1110 textlen,
1111 cachedelta,
1111 cachedelta,
1112 flags,
1112 flags,
1113 )
1113 )
1114
1114
1115 # Note: we should probably purge the potential caches (like the full
1115 # Note: we should probably purge the potential caches (like the full
1116 # manifest cache) between runs.
1116 # manifest cache) between runs.
1117 def find_one():
1117 def find_one():
1118 with revlog._datafp() as fh:
1118 with revlog._datafp() as fh:
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120
1120
1121 timer(find_one)
1121 timer(find_one)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 def perfdiscovery(ui, repo, path, **opts):
1126 def perfdiscovery(ui, repo, path, **opts):
1127 """benchmark discovery between local repo and the peer at given path"""
1127 """benchmark discovery between local repo and the peer at given path"""
1128 repos = [repo, None]
1128 repos = [repo, None]
1129 timer, fm = gettimer(ui, opts)
1129 timer, fm = gettimer(ui, opts)
1130
1130
1131 try:
1131 try:
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133
1133
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 except ImportError:
1135 except ImportError:
1136 try:
1136 try:
1137 from mercurial.utils.urlutil import get_unique_pull_path
1137 from mercurial.utils.urlutil import get_unique_pull_path
1138
1138
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 except ImportError:
1140 except ImportError:
1141 path = ui.expandpath(path)
1141 path = ui.expandpath(path)
1142
1142
1143 def s():
1143 def s():
1144 repos[1] = hg.peer(ui, opts, path)
1144 repos[1] = hg.peer(ui, opts, path)
1145
1145
1146 def d():
1146 def d():
1147 setdiscovery.findcommonheads(ui, *repos)
1147 setdiscovery.findcommonheads(ui, *repos)
1148
1148
1149 timer(d, setup=s)
1149 timer(d, setup=s)
1150 fm.end()
1150 fm.end()
1151
1151
1152
1152
1153 @command(
1153 @command(
1154 b'perf::bookmarks|perfbookmarks',
1154 b'perf::bookmarks|perfbookmarks',
1155 formatteropts
1155 formatteropts
1156 + [
1156 + [
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 ],
1158 ],
1159 )
1159 )
1160 def perfbookmarks(ui, repo, **opts):
1160 def perfbookmarks(ui, repo, **opts):
1161 """benchmark parsing bookmarks from disk to memory"""
1161 """benchmark parsing bookmarks from disk to memory"""
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1163 timer, fm = gettimer(ui, opts)
1164
1164
1165 clearrevlogs = opts[b'clear_revlogs']
1165 clearrevlogs = opts[b'clear_revlogs']
1166
1166
1167 def s():
1167 def s():
1168 if clearrevlogs:
1168 if clearrevlogs:
1169 clearchangelog(repo)
1169 clearchangelog(repo)
1170 clearfilecache(repo, b'_bookmarks')
1170 clearfilecache(repo, b'_bookmarks')
1171
1171
1172 def d():
1172 def d():
1173 repo._bookmarks
1173 repo._bookmarks
1174
1174
1175 timer(d, setup=s)
1175 timer(d, setup=s)
1176 fm.end()
1176 fm.end()
1177
1177
1178
1178
1179 @command(
1179 @command(
1180 b'perf::bundle',
1180 b'perf::bundle',
1181 [
1181 [
1182 (
1182 (
1183 b'r',
1183 b'r',
1184 b'rev',
1184 b'rev',
1185 [],
1185 [],
1186 b'changesets to bundle',
1186 b'changesets to bundle',
1187 b'REV',
1187 b'REV',
1188 ),
1188 ),
1189 (
1189 (
1190 b't',
1190 b't',
1191 b'type',
1191 b'type',
1192 b'none',
1192 b'none',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1194 b'TYPE',
1194 b'TYPE',
1195 ),
1195 ),
1196 ]
1196 ]
1197 + formatteropts,
1197 + formatteropts,
1198 b'REVS',
1198 b'REVS',
1199 )
1199 )
1200 def perfbundle(ui, repo, *revs, **opts):
1200 def perfbundle(ui, repo, *revs, **opts):
1201 """benchmark the creation of a bundle from a repository
1201 """benchmark the creation of a bundle from a repository
1202
1202
1203 For now, this only supports "none" compression.
1203 For now, this only supports "none" compression.
1204 """
1204 """
1205 try:
1205 try:
1206 from mercurial import bundlecaches
1206 from mercurial import bundlecaches
1207
1207
1208 parsebundlespec = bundlecaches.parsebundlespec
1208 parsebundlespec = bundlecaches.parsebundlespec
1209 except ImportError:
1209 except ImportError:
1210 from mercurial import exchange
1210 from mercurial import exchange
1211
1211
1212 parsebundlespec = exchange.parsebundlespec
1212 parsebundlespec = exchange.parsebundlespec
1213
1213
1214 from mercurial import discovery
1214 from mercurial import discovery
1215 from mercurial import bundle2
1215 from mercurial import bundle2
1216
1216
1217 opts = _byteskwargs(opts)
1217 opts = _byteskwargs(opts)
1218 timer, fm = gettimer(ui, opts)
1218 timer, fm = gettimer(ui, opts)
1219
1219
1220 cl = repo.changelog
1220 cl = repo.changelog
1221 revs = list(revs)
1221 revs = list(revs)
1222 revs.extend(opts.get(b'rev', ()))
1222 revs.extend(opts.get(b'rev', ()))
1223 revs = scmutil.revrange(repo, revs)
1223 revs = scmutil.revrange(repo, revs)
1224 if not revs:
1224 if not revs:
1225 raise error.Abort(b"not revision specified")
1225 raise error.Abort(b"not revision specified")
1226 # make it a consistent set (ie: without topological gaps)
1226 # make it a consistent set (ie: without topological gaps)
1227 old_len = len(revs)
1227 old_len = len(revs)
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 if old_len != len(revs):
1229 if old_len != len(revs):
1230 new_count = len(revs) - old_len
1230 new_count = len(revs) - old_len
1231 msg = b"add %d new revisions to make it a consistent set\n"
1231 msg = b"add %d new revisions to make it a consistent set\n"
1232 ui.write_err(msg % new_count)
1232 ui.write_err(msg % new_count)
1233
1233
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 outgoing = discovery.outgoing(repo, bases, targets)
1236 outgoing = discovery.outgoing(repo, bases, targets)
1237
1237
1238 bundle_spec = opts.get(b'type')
1238 bundle_spec = opts.get(b'type')
1239
1239
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241
1241
1242 cgversion = bundle_spec.params.get(b"cg.version")
1242 cgversion = bundle_spec.params.get(b"cg.version")
1243 if cgversion is None:
1243 if cgversion is None:
1244 if bundle_spec.version == b'v1':
1244 if bundle_spec.version == b'v1':
1245 cgversion = b'01'
1245 cgversion = b'01'
1246 if bundle_spec.version == b'v2':
1246 if bundle_spec.version == b'v2':
1247 cgversion = b'02'
1247 cgversion = b'02'
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 err = b"repository does not support bundle version %s"
1249 err = b"repository does not support bundle version %s"
1250 raise error.Abort(err % cgversion)
1250 raise error.Abort(err % cgversion)
1251
1251
1252 if cgversion == b'01': # bundle1
1252 if cgversion == b'01': # bundle1
1253 bversion = b'HG10' + bundle_spec.wirecompression
1253 bversion = b'HG10' + bundle_spec.wirecompression
1254 bcompression = None
1254 bcompression = None
1255 elif cgversion in (b'02', b'03'):
1255 elif cgversion in (b'02', b'03'):
1256 bversion = b'HG20'
1256 bversion = b'HG20'
1257 bcompression = bundle_spec.wirecompression
1257 bcompression = bundle_spec.wirecompression
1258 else:
1258 else:
1259 err = b'perf::bundle: unexpected changegroup version %s'
1259 err = b'perf::bundle: unexpected changegroup version %s'
1260 raise error.ProgrammingError(err % cgversion)
1260 raise error.ProgrammingError(err % cgversion)
1261
1261
1262 if bcompression is None:
1262 if bcompression is None:
1263 bcompression = b'UN'
1263 bcompression = b'UN'
1264
1264
1265 if bcompression != b'UN':
1265 if bcompression != b'UN':
1266 err = b'perf::bundle: compression currently unsupported: %s'
1266 err = b'perf::bundle: compression currently unsupported: %s'
1267 raise error.ProgrammingError(err % bcompression)
1267 raise error.ProgrammingError(err % bcompression)
1268
1268
1269 def do_bundle():
1269 def do_bundle():
1270 bundle2.writenewbundle(
1270 bundle2.writenewbundle(
1271 ui,
1271 ui,
1272 repo,
1272 repo,
1273 b'perf::bundle',
1273 b'perf::bundle',
1274 os.devnull,
1274 os.devnull,
1275 bversion,
1275 bversion,
1276 outgoing,
1276 outgoing,
1277 bundle_spec.params,
1277 bundle_spec.params,
1278 )
1278 )
1279
1279
1280 timer(do_bundle)
1280 timer(do_bundle)
1281 fm.end()
1281 fm.end()
1282
1282
1283
1283
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1286 """Benchmark reading of bundle files.
1286 """Benchmark reading of bundle files.
1287
1287
1288 This command is meant to isolate the I/O part of bundle reading as
1288 This command is meant to isolate the I/O part of bundle reading as
1289 much as possible.
1289 much as possible.
1290 """
1290 """
1291 from mercurial import (
1291 from mercurial import (
1292 bundle2,
1292 bundle2,
1293 exchange,
1293 exchange,
1294 streamclone,
1294 streamclone,
1295 )
1295 )
1296
1296
1297 opts = _byteskwargs(opts)
1297 opts = _byteskwargs(opts)
1298
1298
1299 def makebench(fn):
1299 def makebench(fn):
1300 def run():
1300 def run():
1301 with open(bundlepath, b'rb') as fh:
1301 with open(bundlepath, b'rb') as fh:
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 fn(bundle)
1303 fn(bundle)
1304
1304
1305 return run
1305 return run
1306
1306
1307 def makereadnbytes(size):
1307 def makereadnbytes(size):
1308 def run():
1308 def run():
1309 with open(bundlepath, b'rb') as fh:
1309 with open(bundlepath, b'rb') as fh:
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 while bundle.read(size):
1311 while bundle.read(size):
1312 pass
1312 pass
1313
1313
1314 return run
1314 return run
1315
1315
1316 def makestdioread(size):
1316 def makestdioread(size):
1317 def run():
1317 def run():
1318 with open(bundlepath, b'rb') as fh:
1318 with open(bundlepath, b'rb') as fh:
1319 while fh.read(size):
1319 while fh.read(size):
1320 pass
1320 pass
1321
1321
1322 return run
1322 return run
1323
1323
1324 # bundle1
1324 # bundle1
1325
1325
1326 def deltaiter(bundle):
1326 def deltaiter(bundle):
1327 for delta in bundle.deltaiter():
1327 for delta in bundle.deltaiter():
1328 pass
1328 pass
1329
1329
1330 def iterchunks(bundle):
1330 def iterchunks(bundle):
1331 for chunk in bundle.getchunks():
1331 for chunk in bundle.getchunks():
1332 pass
1332 pass
1333
1333
1334 # bundle2
1334 # bundle2
1335
1335
1336 def forwardchunks(bundle):
1336 def forwardchunks(bundle):
1337 for chunk in bundle._forwardchunks():
1337 for chunk in bundle._forwardchunks():
1338 pass
1338 pass
1339
1339
1340 def iterparts(bundle):
1340 def iterparts(bundle):
1341 for part in bundle.iterparts():
1341 for part in bundle.iterparts():
1342 pass
1342 pass
1343
1343
1344 def iterpartsseekable(bundle):
1344 def iterpartsseekable(bundle):
1345 for part in bundle.iterparts(seekable=True):
1345 for part in bundle.iterparts(seekable=True):
1346 pass
1346 pass
1347
1347
1348 def seek(bundle):
1348 def seek(bundle):
1349 for part in bundle.iterparts(seekable=True):
1349 for part in bundle.iterparts(seekable=True):
1350 part.seek(0, os.SEEK_END)
1350 part.seek(0, os.SEEK_END)
1351
1351
1352 def makepartreadnbytes(size):
1352 def makepartreadnbytes(size):
1353 def run():
1353 def run():
1354 with open(bundlepath, b'rb') as fh:
1354 with open(bundlepath, b'rb') as fh:
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 for part in bundle.iterparts():
1356 for part in bundle.iterparts():
1357 while part.read(size):
1357 while part.read(size):
1358 pass
1358 pass
1359
1359
1360 return run
1360 return run
1361
1361
1362 benches = [
1362 benches = [
1363 (makestdioread(8192), b'read(8k)'),
1363 (makestdioread(8192), b'read(8k)'),
1364 (makestdioread(16384), b'read(16k)'),
1364 (makestdioread(16384), b'read(16k)'),
1365 (makestdioread(32768), b'read(32k)'),
1365 (makestdioread(32768), b'read(32k)'),
1366 (makestdioread(131072), b'read(128k)'),
1366 (makestdioread(131072), b'read(128k)'),
1367 ]
1367 ]
1368
1368
1369 with open(bundlepath, b'rb') as fh:
1369 with open(bundlepath, b'rb') as fh:
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1371
1371
1372 if isinstance(bundle, changegroup.cg1unpacker):
1372 if isinstance(bundle, changegroup.cg1unpacker):
1373 benches.extend(
1373 benches.extend(
1374 [
1374 [
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 ]
1381 ]
1382 )
1382 )
1383 elif isinstance(bundle, bundle2.unbundle20):
1383 elif isinstance(bundle, bundle2.unbundle20):
1384 benches.extend(
1384 benches.extend(
1385 [
1385 [
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1388 (
1388 (
1389 makebench(iterpartsseekable),
1389 makebench(iterpartsseekable),
1390 b'bundle2 iterparts() seekable',
1390 b'bundle2 iterparts() seekable',
1391 ),
1391 ),
1392 (makebench(seek), b'bundle2 part seek()'),
1392 (makebench(seek), b'bundle2 part seek()'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 ]
1397 ]
1398 )
1398 )
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 raise error.Abort(b'stream clone bundles not supported')
1400 raise error.Abort(b'stream clone bundles not supported')
1401 else:
1401 else:
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403
1403
1404 for fn, title in benches:
1404 for fn, title in benches:
1405 timer, fm = gettimer(ui, opts)
1405 timer, fm = gettimer(ui, opts)
1406 timer(fn, title=title)
1406 timer(fn, title=title)
1407 fm.end()
1407 fm.end()
1408
1408
1409
1409
1410 @command(
1410 @command(
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 formatteropts
1412 formatteropts
1413 + [
1413 + [
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 ],
1416 ],
1417 )
1417 )
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 """Benchmark producing a changelog group for a changegroup.
1419 """Benchmark producing a changelog group for a changegroup.
1420
1420
1421 This measures the time spent processing the changelog during a
1421 This measures the time spent processing the changelog during a
1422 bundle operation. This occurs during `hg bundle` and on a server
1422 bundle operation. This occurs during `hg bundle` and on a server
1423 processing a `getbundle` wire protocol request (handles clones
1423 processing a `getbundle` wire protocol request (handles clones
1424 and pull requests).
1424 and pull requests).
1425
1425
1426 By default, all revisions are added to the changegroup.
1426 By default, all revisions are added to the changegroup.
1427 """
1427 """
1428 opts = _byteskwargs(opts)
1428 opts = _byteskwargs(opts)
1429 cl = repo.changelog
1429 cl = repo.changelog
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 bundler = changegroup.getbundler(cgversion, repo)
1431 bundler = changegroup.getbundler(cgversion, repo)
1432
1432
1433 def d():
1433 def d():
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1435 for chunk in chunks:
1435 for chunk in chunks:
1436 pass
1436 pass
1437
1437
1438 timer, fm = gettimer(ui, opts)
1438 timer, fm = gettimer(ui, opts)
1439
1439
1440 # Terminal printing can interfere with timing. So disable it.
1440 # Terminal printing can interfere with timing. So disable it.
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1442 timer(d)
1442 timer(d)
1443
1443
1444 fm.end()
1444 fm.end()
1445
1445
1446
1446
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1448 def perfdirs(ui, repo, **opts):
1448 def perfdirs(ui, repo, **opts):
1449 opts = _byteskwargs(opts)
1449 opts = _byteskwargs(opts)
1450 timer, fm = gettimer(ui, opts)
1450 timer, fm = gettimer(ui, opts)
1451 dirstate = repo.dirstate
1451 dirstate = repo.dirstate
1452 b'a' in dirstate
1452 b'a' in dirstate
1453
1453
1454 def d():
1454 def d():
1455 dirstate.hasdir(b'a')
1455 dirstate.hasdir(b'a')
1456 try:
1456 try:
1457 del dirstate._map._dirs
1457 del dirstate._map._dirs
1458 except AttributeError:
1458 except AttributeError:
1459 pass
1459 pass
1460
1460
1461 timer(d)
1461 timer(d)
1462 fm.end()
1462 fm.end()
1463
1463
1464
1464
1465 @command(
1465 @command(
1466 b'perf::dirstate|perfdirstate',
1466 b'perf::dirstate|perfdirstate',
1467 [
1467 [
1468 (
1468 (
1469 b'',
1469 b'',
1470 b'iteration',
1470 b'iteration',
1471 None,
1471 None,
1472 b'benchmark a full iteration for the dirstate',
1472 b'benchmark a full iteration for the dirstate',
1473 ),
1473 ),
1474 (
1474 (
1475 b'',
1475 b'',
1476 b'contains',
1476 b'contains',
1477 None,
1477 None,
1478 b'benchmark a large amount of `nf in dirstate` calls',
1478 b'benchmark a large amount of `nf in dirstate` calls',
1479 ),
1479 ),
1480 ]
1480 ]
1481 + formatteropts,
1481 + formatteropts,
1482 )
1482 )
1483 def perfdirstate(ui, repo, **opts):
1483 def perfdirstate(ui, repo, **opts):
1484 """benchmap the time of various distate operations
1484 """benchmap the time of various distate operations
1485
1485
1486 By default benchmark the time necessary to load a dirstate from scratch.
1486 By default benchmark the time necessary to load a dirstate from scratch.
1487 The dirstate is loaded to the point were a "contains" request can be
1487 The dirstate is loaded to the point were a "contains" request can be
1488 answered.
1488 answered.
1489 """
1489 """
1490 opts = _byteskwargs(opts)
1490 opts = _byteskwargs(opts)
1491 timer, fm = gettimer(ui, opts)
1491 timer, fm = gettimer(ui, opts)
1492 b"a" in repo.dirstate
1492 b"a" in repo.dirstate
1493
1493
1494 if opts[b'iteration'] and opts[b'contains']:
1494 if opts[b'iteration'] and opts[b'contains']:
1495 msg = b'only specify one of --iteration or --contains'
1495 msg = b'only specify one of --iteration or --contains'
1496 raise error.Abort(msg)
1496 raise error.Abort(msg)
1497
1497
1498 if opts[b'iteration']:
1498 if opts[b'iteration']:
1499 setup = None
1499 setup = None
1500 dirstate = repo.dirstate
1500 dirstate = repo.dirstate
1501
1501
1502 def d():
1502 def d():
1503 for f in dirstate:
1503 for f in dirstate:
1504 pass
1504 pass
1505
1505
1506 elif opts[b'contains']:
1506 elif opts[b'contains']:
1507 setup = None
1507 setup = None
1508 dirstate = repo.dirstate
1508 dirstate = repo.dirstate
1509 allfiles = list(dirstate)
1509 allfiles = list(dirstate)
1510 # also add file path that will be "missing" from the dirstate
1510 # also add file path that will be "missing" from the dirstate
1511 allfiles.extend([f[::-1] for f in allfiles])
1511 allfiles.extend([f[::-1] for f in allfiles])
1512
1512
1513 def d():
1513 def d():
1514 for f in allfiles:
1514 for f in allfiles:
1515 f in dirstate
1515 f in dirstate
1516
1516
1517 else:
1517 else:
1518
1518
1519 def setup():
1519 def setup():
1520 repo.dirstate.invalidate()
1520 repo.dirstate.invalidate()
1521
1521
1522 def d():
1522 def d():
1523 b"a" in repo.dirstate
1523 b"a" in repo.dirstate
1524
1524
1525 timer(d, setup=setup)
1525 timer(d, setup=setup)
1526 fm.end()
1526 fm.end()
1527
1527
1528
1528
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 def perfdirstatedirs(ui, repo, **opts):
1530 def perfdirstatedirs(ui, repo, **opts):
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 opts = _byteskwargs(opts)
1532 opts = _byteskwargs(opts)
1533 timer, fm = gettimer(ui, opts)
1533 timer, fm = gettimer(ui, opts)
1534 repo.dirstate.hasdir(b"a")
1534 repo.dirstate.hasdir(b"a")
1535
1535
1536 def setup():
1536 def setup():
1537 try:
1537 try:
1538 del repo.dirstate._map._dirs
1538 del repo.dirstate._map._dirs
1539 except AttributeError:
1539 except AttributeError:
1540 pass
1540 pass
1541
1541
1542 def d():
1542 def d():
1543 repo.dirstate.hasdir(b"a")
1543 repo.dirstate.hasdir(b"a")
1544
1544
1545 timer(d, setup=setup)
1545 timer(d, setup=setup)
1546 fm.end()
1546 fm.end()
1547
1547
1548
1548
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 def perfdirstatefoldmap(ui, repo, **opts):
1550 def perfdirstatefoldmap(ui, repo, **opts):
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1552
1552
1553 The dirstate filefoldmap cache is dropped between every request.
1553 The dirstate filefoldmap cache is dropped between every request.
1554 """
1554 """
1555 opts = _byteskwargs(opts)
1555 opts = _byteskwargs(opts)
1556 timer, fm = gettimer(ui, opts)
1556 timer, fm = gettimer(ui, opts)
1557 dirstate = repo.dirstate
1557 dirstate = repo.dirstate
1558 dirstate._map.filefoldmap.get(b'a')
1558 dirstate._map.filefoldmap.get(b'a')
1559
1559
1560 def setup():
1560 def setup():
1561 del dirstate._map.filefoldmap
1561 del dirstate._map.filefoldmap
1562
1562
1563 def d():
1563 def d():
1564 dirstate._map.filefoldmap.get(b'a')
1564 dirstate._map.filefoldmap.get(b'a')
1565
1565
1566 timer(d, setup=setup)
1566 timer(d, setup=setup)
1567 fm.end()
1567 fm.end()
1568
1568
1569
1569
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 def perfdirfoldmap(ui, repo, **opts):
1571 def perfdirfoldmap(ui, repo, **opts):
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573
1573
1574 The dirstate dirfoldmap cache is dropped between every request.
1574 The dirstate dirfoldmap cache is dropped between every request.
1575 """
1575 """
1576 opts = _byteskwargs(opts)
1576 opts = _byteskwargs(opts)
1577 timer, fm = gettimer(ui, opts)
1577 timer, fm = gettimer(ui, opts)
1578 dirstate = repo.dirstate
1578 dirstate = repo.dirstate
1579 dirstate._map.dirfoldmap.get(b'a')
1579 dirstate._map.dirfoldmap.get(b'a')
1580
1580
1581 def setup():
1581 def setup():
1582 del dirstate._map.dirfoldmap
1582 del dirstate._map.dirfoldmap
1583 try:
1583 try:
1584 del dirstate._map._dirs
1584 del dirstate._map._dirs
1585 except AttributeError:
1585 except AttributeError:
1586 pass
1586 pass
1587
1587
1588 def d():
1588 def d():
1589 dirstate._map.dirfoldmap.get(b'a')
1589 dirstate._map.dirfoldmap.get(b'a')
1590
1590
1591 timer(d, setup=setup)
1591 timer(d, setup=setup)
1592 fm.end()
1592 fm.end()
1593
1593
1594
1594
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 def perfdirstatewrite(ui, repo, **opts):
1596 def perfdirstatewrite(ui, repo, **opts):
1597 """benchmap the time it take to write a dirstate on disk"""
1597 """benchmap the time it take to write a dirstate on disk"""
1598 opts = _byteskwargs(opts)
1598 opts = _byteskwargs(opts)
1599 timer, fm = gettimer(ui, opts)
1599 timer, fm = gettimer(ui, opts)
1600 ds = repo.dirstate
1600 ds = repo.dirstate
1601 b"a" in ds
1601 b"a" in ds
1602
1602
1603 def setup():
1603 def setup():
1604 ds._dirty = True
1604 ds._dirty = True
1605
1605
1606 def d():
1606 def d():
1607 ds.write(repo.currenttransaction())
1607 ds.write(repo.currenttransaction())
1608
1608
1609 with repo.wlock():
1609 with repo.wlock():
1610 timer(d, setup=setup)
1610 timer(d, setup=setup)
1611 fm.end()
1611 fm.end()
1612
1612
1613
1613
1614 def _getmergerevs(repo, opts):
1614 def _getmergerevs(repo, opts):
1615 """parse command argument to return rev involved in merge
1615 """parse command argument to return rev involved in merge
1616
1616
1617 input: options dictionnary with `rev`, `from` and `bse`
1617 input: options dictionnary with `rev`, `from` and `bse`
1618 output: (localctx, otherctx, basectx)
1618 output: (localctx, otherctx, basectx)
1619 """
1619 """
1620 if opts[b'from']:
1620 if opts[b'from']:
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 wctx = repo[fromrev]
1622 wctx = repo[fromrev]
1623 else:
1623 else:
1624 wctx = repo[None]
1624 wctx = repo[None]
1625 # we don't want working dir files to be stat'd in the benchmark, so
1625 # we don't want working dir files to be stat'd in the benchmark, so
1626 # prime that cache
1626 # prime that cache
1627 wctx.dirty()
1627 wctx.dirty()
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 if opts[b'base']:
1629 if opts[b'base']:
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 ancestor = repo[fromrev]
1631 ancestor = repo[fromrev]
1632 else:
1632 else:
1633 ancestor = wctx.ancestor(rctx)
1633 ancestor = wctx.ancestor(rctx)
1634 return (wctx, rctx, ancestor)
1634 return (wctx, rctx, ancestor)
1635
1635
1636
1636
1637 @command(
1637 @command(
1638 b'perf::mergecalculate|perfmergecalculate',
1638 b'perf::mergecalculate|perfmergecalculate',
1639 [
1639 [
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1641 (b'', b'from', b'', b'rev to merge from'),
1641 (b'', b'from', b'', b'rev to merge from'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1643 ]
1643 ]
1644 + formatteropts,
1644 + formatteropts,
1645 )
1645 )
1646 def perfmergecalculate(ui, repo, **opts):
1646 def perfmergecalculate(ui, repo, **opts):
1647 opts = _byteskwargs(opts)
1647 opts = _byteskwargs(opts)
1648 timer, fm = gettimer(ui, opts)
1648 timer, fm = gettimer(ui, opts)
1649
1649
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651
1651
1652 def d():
1652 def d():
1653 # acceptremote is True because we don't want prompts in the middle of
1653 # acceptremote is True because we don't want prompts in the middle of
1654 # our benchmark
1654 # our benchmark
1655 merge.calculateupdates(
1655 merge.calculateupdates(
1656 repo,
1656 repo,
1657 wctx,
1657 wctx,
1658 rctx,
1658 rctx,
1659 [ancestor],
1659 [ancestor],
1660 branchmerge=False,
1660 branchmerge=False,
1661 force=False,
1661 force=False,
1662 acceptremote=True,
1662 acceptremote=True,
1663 followcopies=True,
1663 followcopies=True,
1664 )
1664 )
1665
1665
1666 timer(d)
1666 timer(d)
1667 fm.end()
1667 fm.end()
1668
1668
1669
1669
1670 @command(
1670 @command(
1671 b'perf::mergecopies|perfmergecopies',
1671 b'perf::mergecopies|perfmergecopies',
1672 [
1672 [
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1674 (b'', b'from', b'', b'rev to merge from'),
1674 (b'', b'from', b'', b'rev to merge from'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1676 ]
1676 ]
1677 + formatteropts,
1677 + formatteropts,
1678 )
1678 )
1679 def perfmergecopies(ui, repo, **opts):
1679 def perfmergecopies(ui, repo, **opts):
1680 """measure runtime of `copies.mergecopies`"""
1680 """measure runtime of `copies.mergecopies`"""
1681 opts = _byteskwargs(opts)
1681 opts = _byteskwargs(opts)
1682 timer, fm = gettimer(ui, opts)
1682 timer, fm = gettimer(ui, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684
1684
1685 def d():
1685 def d():
1686 # acceptremote is True because we don't want prompts in the middle of
1686 # acceptremote is True because we don't want prompts in the middle of
1687 # our benchmark
1687 # our benchmark
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1689
1689
1690 timer(d)
1690 timer(d)
1691 fm.end()
1691 fm.end()
1692
1692
1693
1693
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 """benchmark the copy tracing logic"""
1696 """benchmark the copy tracing logic"""
1697 opts = _byteskwargs(opts)
1697 opts = _byteskwargs(opts)
1698 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701
1701
1702 def d():
1702 def d():
1703 copies.pathcopies(ctx1, ctx2)
1703 copies.pathcopies(ctx1, ctx2)
1704
1704
1705 timer(d)
1705 timer(d)
1706 fm.end()
1706 fm.end()
1707
1707
1708
1708
1709 @command(
1709 @command(
1710 b'perf::phases|perfphases',
1710 b'perf::phases|perfphases',
1711 [
1711 [
1712 (b'', b'full', False, b'include file reading time too'),
1712 (b'', b'full', False, b'include file reading time too'),
1713 ],
1713 ],
1714 b"",
1714 b"",
1715 )
1715 )
1716 def perfphases(ui, repo, **opts):
1716 def perfphases(ui, repo, **opts):
1717 """benchmark phasesets computation"""
1717 """benchmark phasesets computation"""
1718 opts = _byteskwargs(opts)
1718 opts = _byteskwargs(opts)
1719 timer, fm = gettimer(ui, opts)
1719 timer, fm = gettimer(ui, opts)
1720 _phases = repo._phasecache
1720 _phases = repo._phasecache
1721 full = opts.get(b'full')
1721 full = opts.get(b'full')
1722 tip_rev = repo.changelog.tiprev()
1722
1723
1723 def d():
1724 def d():
1724 phases = _phases
1725 phases = _phases
1725 if full:
1726 if full:
1726 clearfilecache(repo, b'_phasecache')
1727 clearfilecache(repo, b'_phasecache')
1727 phases = repo._phasecache
1728 phases = repo._phasecache
1728 phases.invalidate()
1729 phases.invalidate()
1729 phases.loadphaserevs(repo)
1730 phases.phase(repo, tip_rev)
1730
1731
1731 timer(d)
1732 timer(d)
1732 fm.end()
1733 fm.end()
1733
1734
1734
1735
1735 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1736 def perfphasesremote(ui, repo, dest=None, **opts):
1737 def perfphasesremote(ui, repo, dest=None, **opts):
1737 """benchmark time needed to analyse phases of the remote server"""
1738 """benchmark time needed to analyse phases of the remote server"""
1738 from mercurial.node import bin
1739 from mercurial.node import bin
1739 from mercurial import (
1740 from mercurial import (
1740 exchange,
1741 exchange,
1741 hg,
1742 hg,
1742 phases,
1743 phases,
1743 )
1744 )
1744
1745
1745 opts = _byteskwargs(opts)
1746 opts = _byteskwargs(opts)
1746 timer, fm = gettimer(ui, opts)
1747 timer, fm = gettimer(ui, opts)
1747
1748
1748 path = ui.getpath(dest, default=(b'default-push', b'default'))
1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1749 if not path:
1750 if not path:
1750 raise error.Abort(
1751 raise error.Abort(
1751 b'default repository not configured!',
1752 b'default repository not configured!',
1752 hint=b"see 'hg help config.paths'",
1753 hint=b"see 'hg help config.paths'",
1753 )
1754 )
1754 if util.safehasattr(path, 'main_path'):
1755 if util.safehasattr(path, 'main_path'):
1755 path = path.get_push_variant()
1756 path = path.get_push_variant()
1756 dest = path.loc
1757 dest = path.loc
1757 else:
1758 else:
1758 dest = path.pushloc or path.loc
1759 dest = path.pushloc or path.loc
1759 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1760 other = hg.peer(repo, opts, dest)
1761 other = hg.peer(repo, opts, dest)
1761
1762
1762 # easier to perform discovery through the operation
1763 # easier to perform discovery through the operation
1763 op = exchange.pushoperation(repo, other)
1764 op = exchange.pushoperation(repo, other)
1764 exchange._pushdiscoverychangeset(op)
1765 exchange._pushdiscoverychangeset(op)
1765
1766
1766 remotesubset = op.fallbackheads
1767 remotesubset = op.fallbackheads
1767
1768
1768 with other.commandexecutor() as e:
1769 with other.commandexecutor() as e:
1769 remotephases = e.callcommand(
1770 remotephases = e.callcommand(
1770 b'listkeys', {b'namespace': b'phases'}
1771 b'listkeys', {b'namespace': b'phases'}
1771 ).result()
1772 ).result()
1772 del other
1773 del other
1773 publishing = remotephases.get(b'publishing', False)
1774 publishing = remotephases.get(b'publishing', False)
1774 if publishing:
1775 if publishing:
1775 ui.statusnoi18n(b'publishing: yes\n')
1776 ui.statusnoi18n(b'publishing: yes\n')
1776 else:
1777 else:
1777 ui.statusnoi18n(b'publishing: no\n')
1778 ui.statusnoi18n(b'publishing: no\n')
1778
1779
1779 has_node = getattr(repo.changelog.index, 'has_node', None)
1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1780 if has_node is None:
1781 if has_node is None:
1781 has_node = repo.changelog.nodemap.__contains__
1782 has_node = repo.changelog.nodemap.__contains__
1782 nonpublishroots = 0
1783 nonpublishroots = 0
1783 for nhex, phase in remotephases.iteritems():
1784 for nhex, phase in remotephases.iteritems():
1784 if nhex == b'publishing': # ignore data related to publish option
1785 if nhex == b'publishing': # ignore data related to publish option
1785 continue
1786 continue
1786 node = bin(nhex)
1787 node = bin(nhex)
1787 if has_node(node) and int(phase):
1788 if has_node(node) and int(phase):
1788 nonpublishroots += 1
1789 nonpublishroots += 1
1789 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1790 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1791
1792
1792 def d():
1793 def d():
1793 phases.remotephasessummary(repo, remotesubset, remotephases)
1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1794
1795
1795 timer(d)
1796 timer(d)
1796 fm.end()
1797 fm.end()
1797
1798
1798
1799
1799 @command(
1800 @command(
1800 b'perf::manifest|perfmanifest',
1801 b'perf::manifest|perfmanifest',
1801 [
1802 [
1802 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1803 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1804 ]
1805 ]
1805 + formatteropts,
1806 + formatteropts,
1806 b'REV|NODE',
1807 b'REV|NODE',
1807 )
1808 )
1808 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1809 """benchmark the time to read a manifest from disk and return a usable
1810 """benchmark the time to read a manifest from disk and return a usable
1810 dict-like object
1811 dict-like object
1811
1812
1812 Manifest caches are cleared before retrieval."""
1813 Manifest caches are cleared before retrieval."""
1813 opts = _byteskwargs(opts)
1814 opts = _byteskwargs(opts)
1814 timer, fm = gettimer(ui, opts)
1815 timer, fm = gettimer(ui, opts)
1815 if not manifest_rev:
1816 if not manifest_rev:
1816 ctx = scmutil.revsingle(repo, rev, rev)
1817 ctx = scmutil.revsingle(repo, rev, rev)
1817 t = ctx.manifestnode()
1818 t = ctx.manifestnode()
1818 else:
1819 else:
1819 from mercurial.node import bin
1820 from mercurial.node import bin
1820
1821
1821 if len(rev) == 40:
1822 if len(rev) == 40:
1822 t = bin(rev)
1823 t = bin(rev)
1823 else:
1824 else:
1824 try:
1825 try:
1825 rev = int(rev)
1826 rev = int(rev)
1826
1827
1827 if util.safehasattr(repo.manifestlog, b'getstorage'):
1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1828 t = repo.manifestlog.getstorage(b'').node(rev)
1829 t = repo.manifestlog.getstorage(b'').node(rev)
1829 else:
1830 else:
1830 t = repo.manifestlog._revlog.lookup(rev)
1831 t = repo.manifestlog._revlog.lookup(rev)
1831 except ValueError:
1832 except ValueError:
1832 raise error.Abort(
1833 raise error.Abort(
1833 b'manifest revision must be integer or full node'
1834 b'manifest revision must be integer or full node'
1834 )
1835 )
1835
1836
1836 def d():
1837 def d():
1837 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1838 repo.manifestlog[t].read()
1839 repo.manifestlog[t].read()
1839
1840
1840 timer(d)
1841 timer(d)
1841 fm.end()
1842 fm.end()
1842
1843
1843
1844
1844 @command(b'perf::changeset|perfchangeset', formatteropts)
1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1845 def perfchangeset(ui, repo, rev, **opts):
1846 def perfchangeset(ui, repo, rev, **opts):
1846 opts = _byteskwargs(opts)
1847 opts = _byteskwargs(opts)
1847 timer, fm = gettimer(ui, opts)
1848 timer, fm = gettimer(ui, opts)
1848 n = scmutil.revsingle(repo, rev).node()
1849 n = scmutil.revsingle(repo, rev).node()
1849
1850
1850 def d():
1851 def d():
1851 repo.changelog.read(n)
1852 repo.changelog.read(n)
1852 # repo.changelog._cache = None
1853 # repo.changelog._cache = None
1853
1854
1854 timer(d)
1855 timer(d)
1855 fm.end()
1856 fm.end()
1856
1857
1857
1858
1858 @command(b'perf::ignore|perfignore', formatteropts)
1859 @command(b'perf::ignore|perfignore', formatteropts)
1859 def perfignore(ui, repo, **opts):
1860 def perfignore(ui, repo, **opts):
1860 """benchmark operation related to computing ignore"""
1861 """benchmark operation related to computing ignore"""
1861 opts = _byteskwargs(opts)
1862 opts = _byteskwargs(opts)
1862 timer, fm = gettimer(ui, opts)
1863 timer, fm = gettimer(ui, opts)
1863 dirstate = repo.dirstate
1864 dirstate = repo.dirstate
1864
1865
1865 def setupone():
1866 def setupone():
1866 dirstate.invalidate()
1867 dirstate.invalidate()
1867 clearfilecache(dirstate, b'_ignore')
1868 clearfilecache(dirstate, b'_ignore')
1868
1869
1869 def runone():
1870 def runone():
1870 dirstate._ignore
1871 dirstate._ignore
1871
1872
1872 timer(runone, setup=setupone, title=b"load")
1873 timer(runone, setup=setupone, title=b"load")
1873 fm.end()
1874 fm.end()
1874
1875
1875
1876
1876 @command(
1877 @command(
1877 b'perf::index|perfindex',
1878 b'perf::index|perfindex',
1878 [
1879 [
1879 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1880 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1881 ]
1882 ]
1882 + formatteropts,
1883 + formatteropts,
1883 )
1884 )
1884 def perfindex(ui, repo, **opts):
1885 def perfindex(ui, repo, **opts):
1885 """benchmark index creation time followed by a lookup
1886 """benchmark index creation time followed by a lookup
1886
1887
1887 The default is to look `tip` up. Depending on the index implementation,
1888 The default is to look `tip` up. Depending on the index implementation,
1888 the revision looked up can matters. For example, an implementation
1889 the revision looked up can matters. For example, an implementation
1889 scanning the index will have a faster lookup time for `--rev tip` than for
1890 scanning the index will have a faster lookup time for `--rev tip` than for
1890 `--rev 0`. The number of looked up revisions and their order can also
1891 `--rev 0`. The number of looked up revisions and their order can also
1891 matters.
1892 matters.
1892
1893
1893 Example of useful set to test:
1894 Example of useful set to test:
1894
1895
1895 * tip
1896 * tip
1896 * 0
1897 * 0
1897 * -10:
1898 * -10:
1898 * :10
1899 * :10
1899 * -10: + :10
1900 * -10: + :10
1900 * :10: + -10:
1901 * :10: + -10:
1901 * -10000:
1902 * -10000:
1902 * -10000: + 0
1903 * -10000: + 0
1903
1904
1904 It is not currently possible to check for lookup of a missing node. For
1905 It is not currently possible to check for lookup of a missing node. For
1905 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1906 import mercurial.revlog
1907 import mercurial.revlog
1907
1908
1908 opts = _byteskwargs(opts)
1909 opts = _byteskwargs(opts)
1909 timer, fm = gettimer(ui, opts)
1910 timer, fm = gettimer(ui, opts)
1910 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1911 if opts[b'no_lookup']:
1912 if opts[b'no_lookup']:
1912 if opts['rev']:
1913 if opts['rev']:
1913 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1914 nodes = []
1915 nodes = []
1915 elif not opts[b'rev']:
1916 elif not opts[b'rev']:
1916 nodes = [repo[b"tip"].node()]
1917 nodes = [repo[b"tip"].node()]
1917 else:
1918 else:
1918 revs = scmutil.revrange(repo, opts[b'rev'])
1919 revs = scmutil.revrange(repo, opts[b'rev'])
1919 cl = repo.changelog
1920 cl = repo.changelog
1920 nodes = [cl.node(r) for r in revs]
1921 nodes = [cl.node(r) for r in revs]
1921
1922
1922 unfi = repo.unfiltered()
1923 unfi = repo.unfiltered()
1923 # find the filecache func directly
1924 # find the filecache func directly
1924 # This avoid polluting the benchmark with the filecache logic
1925 # This avoid polluting the benchmark with the filecache logic
1925 makecl = unfi.__class__.changelog.func
1926 makecl = unfi.__class__.changelog.func
1926
1927
1927 def setup():
1928 def setup():
1928 # probably not necessary, but for good measure
1929 # probably not necessary, but for good measure
1929 clearchangelog(unfi)
1930 clearchangelog(unfi)
1930
1931
1931 def d():
1932 def d():
1932 cl = makecl(unfi)
1933 cl = makecl(unfi)
1933 for n in nodes:
1934 for n in nodes:
1934 cl.rev(n)
1935 cl.rev(n)
1935
1936
1936 timer(d, setup=setup)
1937 timer(d, setup=setup)
1937 fm.end()
1938 fm.end()
1938
1939
1939
1940
1940 @command(
1941 @command(
1941 b'perf::nodemap|perfnodemap',
1942 b'perf::nodemap|perfnodemap',
1942 [
1943 [
1943 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1944 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1945 ]
1946 ]
1946 + formatteropts,
1947 + formatteropts,
1947 )
1948 )
1948 def perfnodemap(ui, repo, **opts):
1949 def perfnodemap(ui, repo, **opts):
1949 """benchmark the time necessary to look up revision from a cold nodemap
1950 """benchmark the time necessary to look up revision from a cold nodemap
1950
1951
1951 Depending on the implementation, the amount and order of revision we look
1952 Depending on the implementation, the amount and order of revision we look
1952 up can varies. Example of useful set to test:
1953 up can varies. Example of useful set to test:
1953 * tip
1954 * tip
1954 * 0
1955 * 0
1955 * -10:
1956 * -10:
1956 * :10
1957 * :10
1957 * -10: + :10
1958 * -10: + :10
1958 * :10: + -10:
1959 * :10: + -10:
1959 * -10000:
1960 * -10000:
1960 * -10000: + 0
1961 * -10000: + 0
1961
1962
1962 The command currently focus on valid binary lookup. Benchmarking for
1963 The command currently focus on valid binary lookup. Benchmarking for
1963 hexlookup, prefix lookup and missing lookup would also be valuable.
1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1964 """
1965 """
1965 import mercurial.revlog
1966 import mercurial.revlog
1966
1967
1967 opts = _byteskwargs(opts)
1968 opts = _byteskwargs(opts)
1968 timer, fm = gettimer(ui, opts)
1969 timer, fm = gettimer(ui, opts)
1969 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1970
1971
1971 unfi = repo.unfiltered()
1972 unfi = repo.unfiltered()
1972 clearcaches = opts[b'clear_caches']
1973 clearcaches = opts[b'clear_caches']
1973 # find the filecache func directly
1974 # find the filecache func directly
1974 # This avoid polluting the benchmark with the filecache logic
1975 # This avoid polluting the benchmark with the filecache logic
1975 makecl = unfi.__class__.changelog.func
1976 makecl = unfi.__class__.changelog.func
1976 if not opts[b'rev']:
1977 if not opts[b'rev']:
1977 raise error.Abort(b'use --rev to specify revisions to look up')
1978 raise error.Abort(b'use --rev to specify revisions to look up')
1978 revs = scmutil.revrange(repo, opts[b'rev'])
1979 revs = scmutil.revrange(repo, opts[b'rev'])
1979 cl = repo.changelog
1980 cl = repo.changelog
1980 nodes = [cl.node(r) for r in revs]
1981 nodes = [cl.node(r) for r in revs]
1981
1982
1982 # use a list to pass reference to a nodemap from one closure to the next
1983 # use a list to pass reference to a nodemap from one closure to the next
1983 nodeget = [None]
1984 nodeget = [None]
1984
1985
1985 def setnodeget():
1986 def setnodeget():
1986 # probably not necessary, but for good measure
1987 # probably not necessary, but for good measure
1987 clearchangelog(unfi)
1988 clearchangelog(unfi)
1988 cl = makecl(unfi)
1989 cl = makecl(unfi)
1989 if util.safehasattr(cl.index, 'get_rev'):
1990 if util.safehasattr(cl.index, 'get_rev'):
1990 nodeget[0] = cl.index.get_rev
1991 nodeget[0] = cl.index.get_rev
1991 else:
1992 else:
1992 nodeget[0] = cl.nodemap.get
1993 nodeget[0] = cl.nodemap.get
1993
1994
1994 def d():
1995 def d():
1995 get = nodeget[0]
1996 get = nodeget[0]
1996 for n in nodes:
1997 for n in nodes:
1997 get(n)
1998 get(n)
1998
1999
1999 setup = None
2000 setup = None
2000 if clearcaches:
2001 if clearcaches:
2001
2002
2002 def setup():
2003 def setup():
2003 setnodeget()
2004 setnodeget()
2004
2005
2005 else:
2006 else:
2006 setnodeget()
2007 setnodeget()
2007 d() # prewarm the data structure
2008 d() # prewarm the data structure
2008 timer(d, setup=setup)
2009 timer(d, setup=setup)
2009 fm.end()
2010 fm.end()
2010
2011
2011
2012
2012 @command(b'perf::startup|perfstartup', formatteropts)
2013 @command(b'perf::startup|perfstartup', formatteropts)
2013 def perfstartup(ui, repo, **opts):
2014 def perfstartup(ui, repo, **opts):
2014 opts = _byteskwargs(opts)
2015 opts = _byteskwargs(opts)
2015 timer, fm = gettimer(ui, opts)
2016 timer, fm = gettimer(ui, opts)
2016
2017
2017 def d():
2018 def d():
2018 if os.name != 'nt':
2019 if os.name != 'nt':
2019 os.system(
2020 os.system(
2020 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2021 )
2022 )
2022 else:
2023 else:
2023 os.environ['HGRCPATH'] = r' '
2024 os.environ['HGRCPATH'] = r' '
2024 os.system("%s version -q > NUL" % sys.argv[0])
2025 os.system("%s version -q > NUL" % sys.argv[0])
2025
2026
2026 timer(d)
2027 timer(d)
2027 fm.end()
2028 fm.end()
2028
2029
2029
2030
2030 def _find_stream_generator(version):
2031 def _find_stream_generator(version):
2031 """find the proper generator function for this stream version"""
2032 """find the proper generator function for this stream version"""
2032 import mercurial.streamclone
2033 import mercurial.streamclone
2033
2034
2034 available = {}
2035 available = {}
2035
2036
2036 # try to fetch a v1 generator
2037 # try to fetch a v1 generator
2037 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2038 if generatev1 is not None:
2039 if generatev1 is not None:
2039
2040
2040 def generate(repo):
2041 def generate(repo):
2041 entries, bytes, data = generatev2(repo, None, None, True)
2042 entries, bytes, data = generatev2(repo, None, None, True)
2042 return data
2043 return data
2043
2044
2044 available[b'v1'] = generatev1
2045 available[b'v1'] = generatev1
2045 # try to fetch a v2 generator
2046 # try to fetch a v2 generator
2046 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2047 if generatev2 is not None:
2048 if generatev2 is not None:
2048
2049
2049 def generate(repo):
2050 def generate(repo):
2050 entries, bytes, data = generatev2(repo, None, None, True)
2051 entries, bytes, data = generatev2(repo, None, None, True)
2051 return data
2052 return data
2052
2053
2053 available[b'v2'] = generate
2054 available[b'v2'] = generate
2054 # try to fetch a v3 generator
2055 # try to fetch a v3 generator
2055 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2056 if generatev3 is not None:
2057 if generatev3 is not None:
2057
2058
2058 def generate(repo):
2059 def generate(repo):
2059 entries, bytes, data = generatev3(repo, None, None, True)
2060 entries, bytes, data = generatev3(repo, None, None, True)
2060 return data
2061 return data
2061
2062
2062 available[b'v3-exp'] = generate
2063 available[b'v3-exp'] = generate
2063
2064
2064 # resolve the request
2065 # resolve the request
2065 if version == b"latest":
2066 if version == b"latest":
2066 # latest is the highest non experimental version
2067 # latest is the highest non experimental version
2067 latest_key = max(v for v in available if b'-exp' not in v)
2068 latest_key = max(v for v in available if b'-exp' not in v)
2068 return available[latest_key]
2069 return available[latest_key]
2069 elif version in available:
2070 elif version in available:
2070 return available[version]
2071 return available[version]
2071 else:
2072 else:
2072 msg = b"unkown or unavailable version: %s"
2073 msg = b"unkown or unavailable version: %s"
2073 msg %= version
2074 msg %= version
2074 hint = b"available versions: %s"
2075 hint = b"available versions: %s"
2075 hint %= b', '.join(sorted(available))
2076 hint %= b', '.join(sorted(available))
2076 raise error.Abort(msg, hint=hint)
2077 raise error.Abort(msg, hint=hint)
2077
2078
2078
2079
2079 @command(
2080 @command(
2080 b'perf::stream-locked-section',
2081 b'perf::stream-locked-section',
2081 [
2082 [
2082 (
2083 (
2083 b'',
2084 b'',
2084 b'stream-version',
2085 b'stream-version',
2085 b'latest',
2086 b'latest',
2086 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2087 ),
2088 ),
2088 ]
2089 ]
2089 + formatteropts,
2090 + formatteropts,
2090 )
2091 )
2091 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2092 """benchmark the initial, repo-locked, section of a stream-clone"""
2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2093
2094
2094 opts = _byteskwargs(opts)
2095 opts = _byteskwargs(opts)
2095 timer, fm = gettimer(ui, opts)
2096 timer, fm = gettimer(ui, opts)
2096
2097
2097 # deletion of the generator may trigger some cleanup that we do not want to
2098 # deletion of the generator may trigger some cleanup that we do not want to
2098 # measure
2099 # measure
2099 result_holder = [None]
2100 result_holder = [None]
2100
2101
2101 def setupone():
2102 def setupone():
2102 result_holder[0] = None
2103 result_holder[0] = None
2103
2104
2104 generate = _find_stream_generator(stream_version)
2105 generate = _find_stream_generator(stream_version)
2105
2106
2106 def runone():
2107 def runone():
2107 # the lock is held for the duration the initialisation
2108 # the lock is held for the duration the initialisation
2108 result_holder[0] = generate(repo)
2109 result_holder[0] = generate(repo)
2109
2110
2110 timer(runone, setup=setupone, title=b"load")
2111 timer(runone, setup=setupone, title=b"load")
2111 fm.end()
2112 fm.end()
2112
2113
2113
2114
2114 @command(
2115 @command(
2115 b'perf::stream-generate',
2116 b'perf::stream-generate',
2116 [
2117 [
2117 (
2118 (
2118 b'',
2119 b'',
2119 b'stream-version',
2120 b'stream-version',
2120 b'latest',
2121 b'latest',
2121 b'stream version to us ("v1", "v2" or "latest", (the default))',
2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2122 ),
2123 ),
2123 ]
2124 ]
2124 + formatteropts,
2125 + formatteropts,
2125 )
2126 )
2126 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2127 """benchmark the full generation of a stream clone"""
2128 """benchmark the full generation of a stream clone"""
2128
2129
2129 opts = _byteskwargs(opts)
2130 opts = _byteskwargs(opts)
2130 timer, fm = gettimer(ui, opts)
2131 timer, fm = gettimer(ui, opts)
2131
2132
2132 # deletion of the generator may trigger some cleanup that we do not want to
2133 # deletion of the generator may trigger some cleanup that we do not want to
2133 # measure
2134 # measure
2134
2135
2135 generate = _find_stream_generator(stream_version)
2136 generate = _find_stream_generator(stream_version)
2136
2137
2137 def runone():
2138 def runone():
2138 # the lock is held for the duration the initialisation
2139 # the lock is held for the duration the initialisation
2139 for chunk in generate(repo):
2140 for chunk in generate(repo):
2140 pass
2141 pass
2141
2142
2142 timer(runone, title=b"generate")
2143 timer(runone, title=b"generate")
2143 fm.end()
2144 fm.end()
2144
2145
2145
2146
2146 @command(
2147 @command(
2147 b'perf::stream-consume',
2148 b'perf::stream-consume',
2148 formatteropts,
2149 formatteropts,
2149 )
2150 )
2150 def perf_stream_clone_consume(ui, repo, filename, **opts):
2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2151 """benchmark the full application of a stream clone
2152 """benchmark the full application of a stream clone
2152
2153
2153 This include the creation of the repository
2154 This include the creation of the repository
2154 """
2155 """
2155 # try except to appease check code
2156 # try except to appease check code
2156 msg = b"mercurial too old, missing necessary module: %s"
2157 msg = b"mercurial too old, missing necessary module: %s"
2157 try:
2158 try:
2158 from mercurial import bundle2
2159 from mercurial import bundle2
2159 except ImportError as exc:
2160 except ImportError as exc:
2160 msg %= _bytestr(exc)
2161 msg %= _bytestr(exc)
2161 raise error.Abort(msg)
2162 raise error.Abort(msg)
2162 try:
2163 try:
2163 from mercurial import exchange
2164 from mercurial import exchange
2164 except ImportError as exc:
2165 except ImportError as exc:
2165 msg %= _bytestr(exc)
2166 msg %= _bytestr(exc)
2166 raise error.Abort(msg)
2167 raise error.Abort(msg)
2167 try:
2168 try:
2168 from mercurial import hg
2169 from mercurial import hg
2169 except ImportError as exc:
2170 except ImportError as exc:
2170 msg %= _bytestr(exc)
2171 msg %= _bytestr(exc)
2171 raise error.Abort(msg)
2172 raise error.Abort(msg)
2172 try:
2173 try:
2173 from mercurial import localrepo
2174 from mercurial import localrepo
2174 except ImportError as exc:
2175 except ImportError as exc:
2175 msg %= _bytestr(exc)
2176 msg %= _bytestr(exc)
2176 raise error.Abort(msg)
2177 raise error.Abort(msg)
2177
2178
2178 opts = _byteskwargs(opts)
2179 opts = _byteskwargs(opts)
2179 timer, fm = gettimer(ui, opts)
2180 timer, fm = gettimer(ui, opts)
2180
2181
2181 # deletion of the generator may trigger some cleanup that we do not want to
2182 # deletion of the generator may trigger some cleanup that we do not want to
2182 # measure
2183 # measure
2183 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2184 raise error.Abort("not a readable file: %s" % filename)
2185 raise error.Abort("not a readable file: %s" % filename)
2185
2186
2186 run_variables = [None, None]
2187 run_variables = [None, None]
2187
2188
2188 @contextlib.contextmanager
2189 @contextlib.contextmanager
2189 def context():
2190 def context():
2190 with open(filename, mode='rb') as bundle:
2191 with open(filename, mode='rb') as bundle:
2191 with tempfile.TemporaryDirectory() as tmp_dir:
2192 with tempfile.TemporaryDirectory() as tmp_dir:
2192 tmp_dir = fsencode(tmp_dir)
2193 tmp_dir = fsencode(tmp_dir)
2193 run_variables[0] = bundle
2194 run_variables[0] = bundle
2194 run_variables[1] = tmp_dir
2195 run_variables[1] = tmp_dir
2195 yield
2196 yield
2196 run_variables[0] = None
2197 run_variables[0] = None
2197 run_variables[1] = None
2198 run_variables[1] = None
2198
2199
2199 def runone():
2200 def runone():
2200 bundle = run_variables[0]
2201 bundle = run_variables[0]
2201 tmp_dir = run_variables[1]
2202 tmp_dir = run_variables[1]
2202 # only pass ui when no srcrepo
2203 # only pass ui when no srcrepo
2203 localrepo.createrepository(
2204 localrepo.createrepository(
2204 repo.ui, tmp_dir, requirements=repo.requirements
2205 repo.ui, tmp_dir, requirements=repo.requirements
2205 )
2206 )
2206 target = hg.repository(repo.ui, tmp_dir)
2207 target = hg.repository(repo.ui, tmp_dir)
2207 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2208 # stream v1
2209 # stream v1
2209 if util.safehasattr(gen, 'apply'):
2210 if util.safehasattr(gen, 'apply'):
2210 gen.apply(target)
2211 gen.apply(target)
2211 else:
2212 else:
2212 with target.transaction(b"perf::stream-consume") as tr:
2213 with target.transaction(b"perf::stream-consume") as tr:
2213 bundle2.applybundle(
2214 bundle2.applybundle(
2214 target,
2215 target,
2215 gen,
2216 gen,
2216 tr,
2217 tr,
2217 source=b'unbundle',
2218 source=b'unbundle',
2218 url=filename,
2219 url=filename,
2219 )
2220 )
2220
2221
2221 timer(runone, context=context, title=b"consume")
2222 timer(runone, context=context, title=b"consume")
2222 fm.end()
2223 fm.end()
2223
2224
2224
2225
2225 @command(b'perf::parents|perfparents', formatteropts)
2226 @command(b'perf::parents|perfparents', formatteropts)
2226 def perfparents(ui, repo, **opts):
2227 def perfparents(ui, repo, **opts):
2227 """benchmark the time necessary to fetch one changeset's parents.
2228 """benchmark the time necessary to fetch one changeset's parents.
2228
2229
2229 The fetch is done using the `node identifier`, traversing all object layers
2230 The fetch is done using the `node identifier`, traversing all object layers
2230 from the repository object. The first N revisions will be used for this
2231 from the repository object. The first N revisions will be used for this
2231 benchmark. N is controlled by the ``perf.parentscount`` config option
2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2232 (default: 1000).
2233 (default: 1000).
2233 """
2234 """
2234 opts = _byteskwargs(opts)
2235 opts = _byteskwargs(opts)
2235 timer, fm = gettimer(ui, opts)
2236 timer, fm = gettimer(ui, opts)
2236 # control the number of commits perfparents iterates over
2237 # control the number of commits perfparents iterates over
2237 # experimental config: perf.parentscount
2238 # experimental config: perf.parentscount
2238 count = getint(ui, b"perf", b"parentscount", 1000)
2239 count = getint(ui, b"perf", b"parentscount", 1000)
2239 if len(repo.changelog) < count:
2240 if len(repo.changelog) < count:
2240 raise error.Abort(b"repo needs %d commits for this test" % count)
2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2241 repo = repo.unfiltered()
2242 repo = repo.unfiltered()
2242 nl = [repo.changelog.node(i) for i in _xrange(count)]
2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2243
2244
2244 def d():
2245 def d():
2245 for n in nl:
2246 for n in nl:
2246 repo.changelog.parents(n)
2247 repo.changelog.parents(n)
2247
2248
2248 timer(d)
2249 timer(d)
2249 fm.end()
2250 fm.end()
2250
2251
2251
2252
2252 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2253 def perfctxfiles(ui, repo, x, **opts):
2254 def perfctxfiles(ui, repo, x, **opts):
2254 opts = _byteskwargs(opts)
2255 opts = _byteskwargs(opts)
2255 x = int(x)
2256 x = int(x)
2256 timer, fm = gettimer(ui, opts)
2257 timer, fm = gettimer(ui, opts)
2257
2258
2258 def d():
2259 def d():
2259 len(repo[x].files())
2260 len(repo[x].files())
2260
2261
2261 timer(d)
2262 timer(d)
2262 fm.end()
2263 fm.end()
2263
2264
2264
2265
2265 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2266 def perfrawfiles(ui, repo, x, **opts):
2267 def perfrawfiles(ui, repo, x, **opts):
2267 opts = _byteskwargs(opts)
2268 opts = _byteskwargs(opts)
2268 x = int(x)
2269 x = int(x)
2269 timer, fm = gettimer(ui, opts)
2270 timer, fm = gettimer(ui, opts)
2270 cl = repo.changelog
2271 cl = repo.changelog
2271
2272
2272 def d():
2273 def d():
2273 len(cl.read(x)[3])
2274 len(cl.read(x)[3])
2274
2275
2275 timer(d)
2276 timer(d)
2276 fm.end()
2277 fm.end()
2277
2278
2278
2279
2279 @command(b'perf::lookup|perflookup', formatteropts)
2280 @command(b'perf::lookup|perflookup', formatteropts)
2280 def perflookup(ui, repo, rev, **opts):
2281 def perflookup(ui, repo, rev, **opts):
2281 opts = _byteskwargs(opts)
2282 opts = _byteskwargs(opts)
2282 timer, fm = gettimer(ui, opts)
2283 timer, fm = gettimer(ui, opts)
2283 timer(lambda: len(repo.lookup(rev)))
2284 timer(lambda: len(repo.lookup(rev)))
2284 fm.end()
2285 fm.end()
2285
2286
2286
2287
2287 @command(
2288 @command(
2288 b'perf::linelogedits|perflinelogedits',
2289 b'perf::linelogedits|perflinelogedits',
2289 [
2290 [
2290 (b'n', b'edits', 10000, b'number of edits'),
2291 (b'n', b'edits', 10000, b'number of edits'),
2291 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2292 ],
2293 ],
2293 norepo=True,
2294 norepo=True,
2294 )
2295 )
2295 def perflinelogedits(ui, **opts):
2296 def perflinelogedits(ui, **opts):
2296 from mercurial import linelog
2297 from mercurial import linelog
2297
2298
2298 opts = _byteskwargs(opts)
2299 opts = _byteskwargs(opts)
2299
2300
2300 edits = opts[b'edits']
2301 edits = opts[b'edits']
2301 maxhunklines = opts[b'max_hunk_lines']
2302 maxhunklines = opts[b'max_hunk_lines']
2302
2303
2303 maxb1 = 100000
2304 maxb1 = 100000
2304 random.seed(0)
2305 random.seed(0)
2305 randint = random.randint
2306 randint = random.randint
2306 currentlines = 0
2307 currentlines = 0
2307 arglist = []
2308 arglist = []
2308 for rev in _xrange(edits):
2309 for rev in _xrange(edits):
2309 a1 = randint(0, currentlines)
2310 a1 = randint(0, currentlines)
2310 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2311 b1 = randint(0, maxb1)
2312 b1 = randint(0, maxb1)
2312 b2 = randint(b1, b1 + maxhunklines)
2313 b2 = randint(b1, b1 + maxhunklines)
2313 currentlines += (b2 - b1) - (a2 - a1)
2314 currentlines += (b2 - b1) - (a2 - a1)
2314 arglist.append((rev, a1, a2, b1, b2))
2315 arglist.append((rev, a1, a2, b1, b2))
2315
2316
2316 def d():
2317 def d():
2317 ll = linelog.linelog()
2318 ll = linelog.linelog()
2318 for args in arglist:
2319 for args in arglist:
2319 ll.replacelines(*args)
2320 ll.replacelines(*args)
2320
2321
2321 timer, fm = gettimer(ui, opts)
2322 timer, fm = gettimer(ui, opts)
2322 timer(d)
2323 timer(d)
2323 fm.end()
2324 fm.end()
2324
2325
2325
2326
2326 @command(b'perf::revrange|perfrevrange', formatteropts)
2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2327 def perfrevrange(ui, repo, *specs, **opts):
2328 def perfrevrange(ui, repo, *specs, **opts):
2328 opts = _byteskwargs(opts)
2329 opts = _byteskwargs(opts)
2329 timer, fm = gettimer(ui, opts)
2330 timer, fm = gettimer(ui, opts)
2330 revrange = scmutil.revrange
2331 revrange = scmutil.revrange
2331 timer(lambda: len(revrange(repo, specs)))
2332 timer(lambda: len(revrange(repo, specs)))
2332 fm.end()
2333 fm.end()
2333
2334
2334
2335
2335 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2336 def perfnodelookup(ui, repo, rev, **opts):
2337 def perfnodelookup(ui, repo, rev, **opts):
2337 opts = _byteskwargs(opts)
2338 opts = _byteskwargs(opts)
2338 timer, fm = gettimer(ui, opts)
2339 timer, fm = gettimer(ui, opts)
2339 import mercurial.revlog
2340 import mercurial.revlog
2340
2341
2341 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2342 n = scmutil.revsingle(repo, rev).node()
2343 n = scmutil.revsingle(repo, rev).node()
2343
2344
2344 try:
2345 try:
2345 cl = revlog(getsvfs(repo), radix=b"00changelog")
2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2346 except TypeError:
2347 except TypeError:
2347 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2348
2349
2349 def d():
2350 def d():
2350 cl.rev(n)
2351 cl.rev(n)
2351 clearcaches(cl)
2352 clearcaches(cl)
2352
2353
2353 timer(d)
2354 timer(d)
2354 fm.end()
2355 fm.end()
2355
2356
2356
2357
2357 @command(
2358 @command(
2358 b'perf::log|perflog',
2359 b'perf::log|perflog',
2359 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2360 )
2361 )
2361 def perflog(ui, repo, rev=None, **opts):
2362 def perflog(ui, repo, rev=None, **opts):
2362 opts = _byteskwargs(opts)
2363 opts = _byteskwargs(opts)
2363 if rev is None:
2364 if rev is None:
2364 rev = []
2365 rev = []
2365 timer, fm = gettimer(ui, opts)
2366 timer, fm = gettimer(ui, opts)
2366 ui.pushbuffer()
2367 ui.pushbuffer()
2367 timer(
2368 timer(
2368 lambda: commands.log(
2369 lambda: commands.log(
2369 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2370 )
2371 )
2371 )
2372 )
2372 ui.popbuffer()
2373 ui.popbuffer()
2373 fm.end()
2374 fm.end()
2374
2375
2375
2376
2376 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2377 def perfmoonwalk(ui, repo, **opts):
2378 def perfmoonwalk(ui, repo, **opts):
2378 """benchmark walking the changelog backwards
2379 """benchmark walking the changelog backwards
2379
2380
2380 This also loads the changelog data for each revision in the changelog.
2381 This also loads the changelog data for each revision in the changelog.
2381 """
2382 """
2382 opts = _byteskwargs(opts)
2383 opts = _byteskwargs(opts)
2383 timer, fm = gettimer(ui, opts)
2384 timer, fm = gettimer(ui, opts)
2384
2385
2385 def moonwalk():
2386 def moonwalk():
2386 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2387 ctx = repo[i]
2388 ctx = repo[i]
2388 ctx.branch() # read changelog data (in addition to the index)
2389 ctx.branch() # read changelog data (in addition to the index)
2389
2390
2390 timer(moonwalk)
2391 timer(moonwalk)
2391 fm.end()
2392 fm.end()
2392
2393
2393
2394
2394 @command(
2395 @command(
2395 b'perf::templating|perftemplating',
2396 b'perf::templating|perftemplating',
2396 [
2397 [
2397 (b'r', b'rev', [], b'revisions to run the template on'),
2398 (b'r', b'rev', [], b'revisions to run the template on'),
2398 ]
2399 ]
2399 + formatteropts,
2400 + formatteropts,
2400 )
2401 )
2401 def perftemplating(ui, repo, testedtemplate=None, **opts):
2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2402 """test the rendering time of a given template"""
2403 """test the rendering time of a given template"""
2403 if makelogtemplater is None:
2404 if makelogtemplater is None:
2404 raise error.Abort(
2405 raise error.Abort(
2405 b"perftemplating not available with this Mercurial",
2406 b"perftemplating not available with this Mercurial",
2406 hint=b"use 4.3 or later",
2407 hint=b"use 4.3 or later",
2407 )
2408 )
2408
2409
2409 opts = _byteskwargs(opts)
2410 opts = _byteskwargs(opts)
2410
2411
2411 nullui = ui.copy()
2412 nullui = ui.copy()
2412 nullui.fout = open(os.devnull, 'wb')
2413 nullui.fout = open(os.devnull, 'wb')
2413 nullui.disablepager()
2414 nullui.disablepager()
2414 revs = opts.get(b'rev')
2415 revs = opts.get(b'rev')
2415 if not revs:
2416 if not revs:
2416 revs = [b'all()']
2417 revs = [b'all()']
2417 revs = list(scmutil.revrange(repo, revs))
2418 revs = list(scmutil.revrange(repo, revs))
2418
2419
2419 defaulttemplate = (
2420 defaulttemplate = (
2420 b'{date|shortdate} [{rev}:{node|short}]'
2421 b'{date|shortdate} [{rev}:{node|short}]'
2421 b' {author|person}: {desc|firstline}\n'
2422 b' {author|person}: {desc|firstline}\n'
2422 )
2423 )
2423 if testedtemplate is None:
2424 if testedtemplate is None:
2424 testedtemplate = defaulttemplate
2425 testedtemplate = defaulttemplate
2425 displayer = makelogtemplater(nullui, repo, testedtemplate)
2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2426
2427
2427 def format():
2428 def format():
2428 for r in revs:
2429 for r in revs:
2429 ctx = repo[r]
2430 ctx = repo[r]
2430 displayer.show(ctx)
2431 displayer.show(ctx)
2431 displayer.flush(ctx)
2432 displayer.flush(ctx)
2432
2433
2433 timer, fm = gettimer(ui, opts)
2434 timer, fm = gettimer(ui, opts)
2434 timer(format)
2435 timer(format)
2435 fm.end()
2436 fm.end()
2436
2437
2437
2438
2438 def _displaystats(ui, opts, entries, data):
2439 def _displaystats(ui, opts, entries, data):
2439 # use a second formatter because the data are quite different, not sure
2440 # use a second formatter because the data are quite different, not sure
2440 # how it flies with the templater.
2441 # how it flies with the templater.
2441 fm = ui.formatter(b'perf-stats', opts)
2442 fm = ui.formatter(b'perf-stats', opts)
2442 for key, title in entries:
2443 for key, title in entries:
2443 values = data[key]
2444 values = data[key]
2444 nbvalues = len(data)
2445 nbvalues = len(data)
2445 values.sort()
2446 values.sort()
2446 stats = {
2447 stats = {
2447 'key': key,
2448 'key': key,
2448 'title': title,
2449 'title': title,
2449 'nbitems': len(values),
2450 'nbitems': len(values),
2450 'min': values[0][0],
2451 'min': values[0][0],
2451 '10%': values[(nbvalues * 10) // 100][0],
2452 '10%': values[(nbvalues * 10) // 100][0],
2452 '25%': values[(nbvalues * 25) // 100][0],
2453 '25%': values[(nbvalues * 25) // 100][0],
2453 '50%': values[(nbvalues * 50) // 100][0],
2454 '50%': values[(nbvalues * 50) // 100][0],
2454 '75%': values[(nbvalues * 75) // 100][0],
2455 '75%': values[(nbvalues * 75) // 100][0],
2455 '80%': values[(nbvalues * 80) // 100][0],
2456 '80%': values[(nbvalues * 80) // 100][0],
2456 '85%': values[(nbvalues * 85) // 100][0],
2457 '85%': values[(nbvalues * 85) // 100][0],
2457 '90%': values[(nbvalues * 90) // 100][0],
2458 '90%': values[(nbvalues * 90) // 100][0],
2458 '95%': values[(nbvalues * 95) // 100][0],
2459 '95%': values[(nbvalues * 95) // 100][0],
2459 '99%': values[(nbvalues * 99) // 100][0],
2460 '99%': values[(nbvalues * 99) // 100][0],
2460 'max': values[-1][0],
2461 'max': values[-1][0],
2461 }
2462 }
2462 fm.startitem()
2463 fm.startitem()
2463 fm.data(**stats)
2464 fm.data(**stats)
2464 # make node pretty for the human output
2465 # make node pretty for the human output
2465 fm.plain('### %s (%d items)\n' % (title, len(values)))
2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2466 lines = [
2467 lines = [
2467 'min',
2468 'min',
2468 '10%',
2469 '10%',
2469 '25%',
2470 '25%',
2470 '50%',
2471 '50%',
2471 '75%',
2472 '75%',
2472 '80%',
2473 '80%',
2473 '85%',
2474 '85%',
2474 '90%',
2475 '90%',
2475 '95%',
2476 '95%',
2476 '99%',
2477 '99%',
2477 'max',
2478 'max',
2478 ]
2479 ]
2479 for l in lines:
2480 for l in lines:
2480 fm.plain('%s: %s\n' % (l, stats[l]))
2481 fm.plain('%s: %s\n' % (l, stats[l]))
2481 fm.end()
2482 fm.end()
2482
2483
2483
2484
2484 @command(
2485 @command(
2485 b'perf::helper-mergecopies|perfhelper-mergecopies',
2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2486 formatteropts
2487 formatteropts
2487 + [
2488 + [
2488 (b'r', b'revs', [], b'restrict search to these revisions'),
2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2489 (b'', b'timing', False, b'provides extra data (costly)'),
2490 (b'', b'timing', False, b'provides extra data (costly)'),
2490 (b'', b'stats', False, b'provides statistic about the measured data'),
2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2491 ],
2492 ],
2492 )
2493 )
2493 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2494 """find statistics about potential parameters for `perfmergecopies`
2495 """find statistics about potential parameters for `perfmergecopies`
2495
2496
2496 This command find (base, p1, p2) triplet relevant for copytracing
2497 This command find (base, p1, p2) triplet relevant for copytracing
2497 benchmarking in the context of a merge. It reports values for some of the
2498 benchmarking in the context of a merge. It reports values for some of the
2498 parameters that impact merge copy tracing time during merge.
2499 parameters that impact merge copy tracing time during merge.
2499
2500
2500 If `--timing` is set, rename detection is run and the associated timing
2501 If `--timing` is set, rename detection is run and the associated timing
2501 will be reported. The extra details come at the cost of slower command
2502 will be reported. The extra details come at the cost of slower command
2502 execution.
2503 execution.
2503
2504
2504 Since rename detection is only run once, other factors might easily
2505 Since rename detection is only run once, other factors might easily
2505 affect the precision of the timing. However it should give a good
2506 affect the precision of the timing. However it should give a good
2506 approximation of which revision triplets are very costly.
2507 approximation of which revision triplets are very costly.
2507 """
2508 """
2508 opts = _byteskwargs(opts)
2509 opts = _byteskwargs(opts)
2509 fm = ui.formatter(b'perf', opts)
2510 fm = ui.formatter(b'perf', opts)
2510 dotiming = opts[b'timing']
2511 dotiming = opts[b'timing']
2511 dostats = opts[b'stats']
2512 dostats = opts[b'stats']
2512
2513
2513 output_template = [
2514 output_template = [
2514 ("base", "%(base)12s"),
2515 ("base", "%(base)12s"),
2515 ("p1", "%(p1.node)12s"),
2516 ("p1", "%(p1.node)12s"),
2516 ("p2", "%(p2.node)12s"),
2517 ("p2", "%(p2.node)12s"),
2517 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2518 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2519 ("p1.renames", "%(p1.renamedfiles)12d"),
2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2520 ("p1.time", "%(p1.time)12.3f"),
2521 ("p1.time", "%(p1.time)12.3f"),
2521 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2522 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2523 ("p2.renames", "%(p2.renamedfiles)12d"),
2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2524 ("p2.time", "%(p2.time)12.3f"),
2525 ("p2.time", "%(p2.time)12.3f"),
2525 ("renames", "%(nbrenamedfiles)12d"),
2526 ("renames", "%(nbrenamedfiles)12d"),
2526 ("total.time", "%(time)12.3f"),
2527 ("total.time", "%(time)12.3f"),
2527 ]
2528 ]
2528 if not dotiming:
2529 if not dotiming:
2529 output_template = [
2530 output_template = [
2530 i
2531 i
2531 for i in output_template
2532 for i in output_template
2532 if not ('time' in i[0] or 'renames' in i[0])
2533 if not ('time' in i[0] or 'renames' in i[0])
2533 ]
2534 ]
2534 header_names = [h for (h, v) in output_template]
2535 header_names = [h for (h, v) in output_template]
2535 output = ' '.join([v for (h, v) in output_template]) + '\n'
2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2536 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2537 fm.plain(header % tuple(header_names))
2538 fm.plain(header % tuple(header_names))
2538
2539
2539 if not revs:
2540 if not revs:
2540 revs = ['all()']
2541 revs = ['all()']
2541 revs = scmutil.revrange(repo, revs)
2542 revs = scmutil.revrange(repo, revs)
2542
2543
2543 if dostats:
2544 if dostats:
2544 alldata = {
2545 alldata = {
2545 'nbrevs': [],
2546 'nbrevs': [],
2546 'nbmissingfiles': [],
2547 'nbmissingfiles': [],
2547 }
2548 }
2548 if dotiming:
2549 if dotiming:
2549 alldata['parentnbrenames'] = []
2550 alldata['parentnbrenames'] = []
2550 alldata['totalnbrenames'] = []
2551 alldata['totalnbrenames'] = []
2551 alldata['parenttime'] = []
2552 alldata['parenttime'] = []
2552 alldata['totaltime'] = []
2553 alldata['totaltime'] = []
2553
2554
2554 roi = repo.revs('merge() and %ld', revs)
2555 roi = repo.revs('merge() and %ld', revs)
2555 for r in roi:
2556 for r in roi:
2556 ctx = repo[r]
2557 ctx = repo[r]
2557 p1 = ctx.p1()
2558 p1 = ctx.p1()
2558 p2 = ctx.p2()
2559 p2 = ctx.p2()
2559 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2560 for b in bases:
2561 for b in bases:
2561 b = repo[b]
2562 b = repo[b]
2562 p1missing = copies._computeforwardmissing(b, p1)
2563 p1missing = copies._computeforwardmissing(b, p1)
2563 p2missing = copies._computeforwardmissing(b, p2)
2564 p2missing = copies._computeforwardmissing(b, p2)
2564 data = {
2565 data = {
2565 b'base': b.hex(),
2566 b'base': b.hex(),
2566 b'p1.node': p1.hex(),
2567 b'p1.node': p1.hex(),
2567 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2568 b'p1.nbmissingfiles': len(p1missing),
2569 b'p1.nbmissingfiles': len(p1missing),
2569 b'p2.node': p2.hex(),
2570 b'p2.node': p2.hex(),
2570 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2571 b'p2.nbmissingfiles': len(p2missing),
2572 b'p2.nbmissingfiles': len(p2missing),
2572 }
2573 }
2573 if dostats:
2574 if dostats:
2574 if p1missing:
2575 if p1missing:
2575 alldata['nbrevs'].append(
2576 alldata['nbrevs'].append(
2576 (data['p1.nbrevs'], b.hex(), p1.hex())
2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2577 )
2578 )
2578 alldata['nbmissingfiles'].append(
2579 alldata['nbmissingfiles'].append(
2579 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2580 )
2581 )
2581 if p2missing:
2582 if p2missing:
2582 alldata['nbrevs'].append(
2583 alldata['nbrevs'].append(
2583 (data['p2.nbrevs'], b.hex(), p2.hex())
2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2584 )
2585 )
2585 alldata['nbmissingfiles'].append(
2586 alldata['nbmissingfiles'].append(
2586 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2587 )
2588 )
2588 if dotiming:
2589 if dotiming:
2589 begin = util.timer()
2590 begin = util.timer()
2590 mergedata = copies.mergecopies(repo, p1, p2, b)
2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2591 end = util.timer()
2592 end = util.timer()
2592 # not very stable timing since we did only one run
2593 # not very stable timing since we did only one run
2593 data['time'] = end - begin
2594 data['time'] = end - begin
2594 # mergedata contains five dicts: "copy", "movewithdir",
2595 # mergedata contains five dicts: "copy", "movewithdir",
2595 # "diverge", "renamedelete" and "dirmove".
2596 # "diverge", "renamedelete" and "dirmove".
2596 # The first 4 are about renamed file so lets count that.
2597 # The first 4 are about renamed file so lets count that.
2597 renames = len(mergedata[0])
2598 renames = len(mergedata[0])
2598 renames += len(mergedata[1])
2599 renames += len(mergedata[1])
2599 renames += len(mergedata[2])
2600 renames += len(mergedata[2])
2600 renames += len(mergedata[3])
2601 renames += len(mergedata[3])
2601 data['nbrenamedfiles'] = renames
2602 data['nbrenamedfiles'] = renames
2602 begin = util.timer()
2603 begin = util.timer()
2603 p1renames = copies.pathcopies(b, p1)
2604 p1renames = copies.pathcopies(b, p1)
2604 end = util.timer()
2605 end = util.timer()
2605 data['p1.time'] = end - begin
2606 data['p1.time'] = end - begin
2606 begin = util.timer()
2607 begin = util.timer()
2607 p2renames = copies.pathcopies(b, p2)
2608 p2renames = copies.pathcopies(b, p2)
2608 end = util.timer()
2609 end = util.timer()
2609 data['p2.time'] = end - begin
2610 data['p2.time'] = end - begin
2610 data['p1.renamedfiles'] = len(p1renames)
2611 data['p1.renamedfiles'] = len(p1renames)
2611 data['p2.renamedfiles'] = len(p2renames)
2612 data['p2.renamedfiles'] = len(p2renames)
2612
2613
2613 if dostats:
2614 if dostats:
2614 if p1missing:
2615 if p1missing:
2615 alldata['parentnbrenames'].append(
2616 alldata['parentnbrenames'].append(
2616 (data['p1.renamedfiles'], b.hex(), p1.hex())
2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2617 )
2618 )
2618 alldata['parenttime'].append(
2619 alldata['parenttime'].append(
2619 (data['p1.time'], b.hex(), p1.hex())
2620 (data['p1.time'], b.hex(), p1.hex())
2620 )
2621 )
2621 if p2missing:
2622 if p2missing:
2622 alldata['parentnbrenames'].append(
2623 alldata['parentnbrenames'].append(
2623 (data['p2.renamedfiles'], b.hex(), p2.hex())
2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2624 )
2625 )
2625 alldata['parenttime'].append(
2626 alldata['parenttime'].append(
2626 (data['p2.time'], b.hex(), p2.hex())
2627 (data['p2.time'], b.hex(), p2.hex())
2627 )
2628 )
2628 if p1missing or p2missing:
2629 if p1missing or p2missing:
2629 alldata['totalnbrenames'].append(
2630 alldata['totalnbrenames'].append(
2630 (
2631 (
2631 data['nbrenamedfiles'],
2632 data['nbrenamedfiles'],
2632 b.hex(),
2633 b.hex(),
2633 p1.hex(),
2634 p1.hex(),
2634 p2.hex(),
2635 p2.hex(),
2635 )
2636 )
2636 )
2637 )
2637 alldata['totaltime'].append(
2638 alldata['totaltime'].append(
2638 (data['time'], b.hex(), p1.hex(), p2.hex())
2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2639 )
2640 )
2640 fm.startitem()
2641 fm.startitem()
2641 fm.data(**data)
2642 fm.data(**data)
2642 # make node pretty for the human output
2643 # make node pretty for the human output
2643 out = data.copy()
2644 out = data.copy()
2644 out['base'] = fm.hexfunc(b.node())
2645 out['base'] = fm.hexfunc(b.node())
2645 out['p1.node'] = fm.hexfunc(p1.node())
2646 out['p1.node'] = fm.hexfunc(p1.node())
2646 out['p2.node'] = fm.hexfunc(p2.node())
2647 out['p2.node'] = fm.hexfunc(p2.node())
2647 fm.plain(output % out)
2648 fm.plain(output % out)
2648
2649
2649 fm.end()
2650 fm.end()
2650 if dostats:
2651 if dostats:
2651 # use a second formatter because the data are quite different, not sure
2652 # use a second formatter because the data are quite different, not sure
2652 # how it flies with the templater.
2653 # how it flies with the templater.
2653 entries = [
2654 entries = [
2654 ('nbrevs', 'number of revision covered'),
2655 ('nbrevs', 'number of revision covered'),
2655 ('nbmissingfiles', 'number of missing files at head'),
2656 ('nbmissingfiles', 'number of missing files at head'),
2656 ]
2657 ]
2657 if dotiming:
2658 if dotiming:
2658 entries.append(
2659 entries.append(
2659 ('parentnbrenames', 'rename from one parent to base')
2660 ('parentnbrenames', 'rename from one parent to base')
2660 )
2661 )
2661 entries.append(('totalnbrenames', 'total number of renames'))
2662 entries.append(('totalnbrenames', 'total number of renames'))
2662 entries.append(('parenttime', 'time for one parent'))
2663 entries.append(('parenttime', 'time for one parent'))
2663 entries.append(('totaltime', 'time for both parents'))
2664 entries.append(('totaltime', 'time for both parents'))
2664 _displaystats(ui, opts, entries, alldata)
2665 _displaystats(ui, opts, entries, alldata)
2665
2666
2666
2667
2667 @command(
2668 @command(
2668 b'perf::helper-pathcopies|perfhelper-pathcopies',
2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2669 formatteropts
2670 formatteropts
2670 + [
2671 + [
2671 (b'r', b'revs', [], b'restrict search to these revisions'),
2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2672 (b'', b'timing', False, b'provides extra data (costly)'),
2673 (b'', b'timing', False, b'provides extra data (costly)'),
2673 (b'', b'stats', False, b'provides statistic about the measured data'),
2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2674 ],
2675 ],
2675 )
2676 )
2676 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2677 """find statistic about potential parameters for the `perftracecopies`
2678 """find statistic about potential parameters for the `perftracecopies`
2678
2679
2679 This command find source-destination pair relevant for copytracing testing.
2680 This command find source-destination pair relevant for copytracing testing.
2680 It report value for some of the parameters that impact copy tracing time.
2681 It report value for some of the parameters that impact copy tracing time.
2681
2682
2682 If `--timing` is set, rename detection is run and the associated timing
2683 If `--timing` is set, rename detection is run and the associated timing
2683 will be reported. The extra details comes at the cost of a slower command
2684 will be reported. The extra details comes at the cost of a slower command
2684 execution.
2685 execution.
2685
2686
2686 Since the rename detection is only run once, other factors might easily
2687 Since the rename detection is only run once, other factors might easily
2687 affect the precision of the timing. However it should give a good
2688 affect the precision of the timing. However it should give a good
2688 approximation of which revision pairs are very costly.
2689 approximation of which revision pairs are very costly.
2689 """
2690 """
2690 opts = _byteskwargs(opts)
2691 opts = _byteskwargs(opts)
2691 fm = ui.formatter(b'perf', opts)
2692 fm = ui.formatter(b'perf', opts)
2692 dotiming = opts[b'timing']
2693 dotiming = opts[b'timing']
2693 dostats = opts[b'stats']
2694 dostats = opts[b'stats']
2694
2695
2695 if dotiming:
2696 if dotiming:
2696 header = '%12s %12s %12s %12s %12s %12s\n'
2697 header = '%12s %12s %12s %12s %12s %12s\n'
2697 output = (
2698 output = (
2698 "%(source)12s %(destination)12s "
2699 "%(source)12s %(destination)12s "
2699 "%(nbrevs)12d %(nbmissingfiles)12d "
2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2700 "%(nbrenamedfiles)12d %(time)18.5f\n"
2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2701 )
2702 )
2702 header_names = (
2703 header_names = (
2703 "source",
2704 "source",
2704 "destination",
2705 "destination",
2705 "nb-revs",
2706 "nb-revs",
2706 "nb-files",
2707 "nb-files",
2707 "nb-renames",
2708 "nb-renames",
2708 "time",
2709 "time",
2709 )
2710 )
2710 fm.plain(header % header_names)
2711 fm.plain(header % header_names)
2711 else:
2712 else:
2712 header = '%12s %12s %12s %12s\n'
2713 header = '%12s %12s %12s %12s\n'
2713 output = (
2714 output = (
2714 "%(source)12s %(destination)12s "
2715 "%(source)12s %(destination)12s "
2715 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2716 )
2717 )
2717 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2718
2719
2719 if not revs:
2720 if not revs:
2720 revs = ['all()']
2721 revs = ['all()']
2721 revs = scmutil.revrange(repo, revs)
2722 revs = scmutil.revrange(repo, revs)
2722
2723
2723 if dostats:
2724 if dostats:
2724 alldata = {
2725 alldata = {
2725 'nbrevs': [],
2726 'nbrevs': [],
2726 'nbmissingfiles': [],
2727 'nbmissingfiles': [],
2727 }
2728 }
2728 if dotiming:
2729 if dotiming:
2729 alldata['nbrenames'] = []
2730 alldata['nbrenames'] = []
2730 alldata['time'] = []
2731 alldata['time'] = []
2731
2732
2732 roi = repo.revs('merge() and %ld', revs)
2733 roi = repo.revs('merge() and %ld', revs)
2733 for r in roi:
2734 for r in roi:
2734 ctx = repo[r]
2735 ctx = repo[r]
2735 p1 = ctx.p1().rev()
2736 p1 = ctx.p1().rev()
2736 p2 = ctx.p2().rev()
2737 p2 = ctx.p2().rev()
2737 bases = repo.changelog._commonancestorsheads(p1, p2)
2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2738 for p in (p1, p2):
2739 for p in (p1, p2):
2739 for b in bases:
2740 for b in bases:
2740 base = repo[b]
2741 base = repo[b]
2741 parent = repo[p]
2742 parent = repo[p]
2742 missing = copies._computeforwardmissing(base, parent)
2743 missing = copies._computeforwardmissing(base, parent)
2743 if not missing:
2744 if not missing:
2744 continue
2745 continue
2745 data = {
2746 data = {
2746 b'source': base.hex(),
2747 b'source': base.hex(),
2747 b'destination': parent.hex(),
2748 b'destination': parent.hex(),
2748 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2749 b'nbmissingfiles': len(missing),
2750 b'nbmissingfiles': len(missing),
2750 }
2751 }
2751 if dostats:
2752 if dostats:
2752 alldata['nbrevs'].append(
2753 alldata['nbrevs'].append(
2753 (
2754 (
2754 data['nbrevs'],
2755 data['nbrevs'],
2755 base.hex(),
2756 base.hex(),
2756 parent.hex(),
2757 parent.hex(),
2757 )
2758 )
2758 )
2759 )
2759 alldata['nbmissingfiles'].append(
2760 alldata['nbmissingfiles'].append(
2760 (
2761 (
2761 data['nbmissingfiles'],
2762 data['nbmissingfiles'],
2762 base.hex(),
2763 base.hex(),
2763 parent.hex(),
2764 parent.hex(),
2764 )
2765 )
2765 )
2766 )
2766 if dotiming:
2767 if dotiming:
2767 begin = util.timer()
2768 begin = util.timer()
2768 renames = copies.pathcopies(base, parent)
2769 renames = copies.pathcopies(base, parent)
2769 end = util.timer()
2770 end = util.timer()
2770 # not very stable timing since we did only one run
2771 # not very stable timing since we did only one run
2771 data['time'] = end - begin
2772 data['time'] = end - begin
2772 data['nbrenamedfiles'] = len(renames)
2773 data['nbrenamedfiles'] = len(renames)
2773 if dostats:
2774 if dostats:
2774 alldata['time'].append(
2775 alldata['time'].append(
2775 (
2776 (
2776 data['time'],
2777 data['time'],
2777 base.hex(),
2778 base.hex(),
2778 parent.hex(),
2779 parent.hex(),
2779 )
2780 )
2780 )
2781 )
2781 alldata['nbrenames'].append(
2782 alldata['nbrenames'].append(
2782 (
2783 (
2783 data['nbrenamedfiles'],
2784 data['nbrenamedfiles'],
2784 base.hex(),
2785 base.hex(),
2785 parent.hex(),
2786 parent.hex(),
2786 )
2787 )
2787 )
2788 )
2788 fm.startitem()
2789 fm.startitem()
2789 fm.data(**data)
2790 fm.data(**data)
2790 out = data.copy()
2791 out = data.copy()
2791 out['source'] = fm.hexfunc(base.node())
2792 out['source'] = fm.hexfunc(base.node())
2792 out['destination'] = fm.hexfunc(parent.node())
2793 out['destination'] = fm.hexfunc(parent.node())
2793 fm.plain(output % out)
2794 fm.plain(output % out)
2794
2795
2795 fm.end()
2796 fm.end()
2796 if dostats:
2797 if dostats:
2797 entries = [
2798 entries = [
2798 ('nbrevs', 'number of revision covered'),
2799 ('nbrevs', 'number of revision covered'),
2799 ('nbmissingfiles', 'number of missing files at head'),
2800 ('nbmissingfiles', 'number of missing files at head'),
2800 ]
2801 ]
2801 if dotiming:
2802 if dotiming:
2802 entries.append(('nbrenames', 'renamed files'))
2803 entries.append(('nbrenames', 'renamed files'))
2803 entries.append(('time', 'time'))
2804 entries.append(('time', 'time'))
2804 _displaystats(ui, opts, entries, alldata)
2805 _displaystats(ui, opts, entries, alldata)
2805
2806
2806
2807
2807 @command(b'perf::cca|perfcca', formatteropts)
2808 @command(b'perf::cca|perfcca', formatteropts)
2808 def perfcca(ui, repo, **opts):
2809 def perfcca(ui, repo, **opts):
2809 opts = _byteskwargs(opts)
2810 opts = _byteskwargs(opts)
2810 timer, fm = gettimer(ui, opts)
2811 timer, fm = gettimer(ui, opts)
2811 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2812 fm.end()
2813 fm.end()
2813
2814
2814
2815
2815 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2816 def perffncacheload(ui, repo, **opts):
2817 def perffncacheload(ui, repo, **opts):
2817 opts = _byteskwargs(opts)
2818 opts = _byteskwargs(opts)
2818 timer, fm = gettimer(ui, opts)
2819 timer, fm = gettimer(ui, opts)
2819 s = repo.store
2820 s = repo.store
2820
2821
2821 def d():
2822 def d():
2822 s.fncache._load()
2823 s.fncache._load()
2823
2824
2824 timer(d)
2825 timer(d)
2825 fm.end()
2826 fm.end()
2826
2827
2827
2828
2828 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2829 def perffncachewrite(ui, repo, **opts):
2830 def perffncachewrite(ui, repo, **opts):
2830 opts = _byteskwargs(opts)
2831 opts = _byteskwargs(opts)
2831 timer, fm = gettimer(ui, opts)
2832 timer, fm = gettimer(ui, opts)
2832 s = repo.store
2833 s = repo.store
2833 lock = repo.lock()
2834 lock = repo.lock()
2834 s.fncache._load()
2835 s.fncache._load()
2835 tr = repo.transaction(b'perffncachewrite')
2836 tr = repo.transaction(b'perffncachewrite')
2836 tr.addbackup(b'fncache')
2837 tr.addbackup(b'fncache')
2837
2838
2838 def d():
2839 def d():
2839 s.fncache._dirty = True
2840 s.fncache._dirty = True
2840 s.fncache.write(tr)
2841 s.fncache.write(tr)
2841
2842
2842 timer(d)
2843 timer(d)
2843 tr.close()
2844 tr.close()
2844 lock.release()
2845 lock.release()
2845 fm.end()
2846 fm.end()
2846
2847
2847
2848
2848 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2849 def perffncacheencode(ui, repo, **opts):
2850 def perffncacheencode(ui, repo, **opts):
2850 opts = _byteskwargs(opts)
2851 opts = _byteskwargs(opts)
2851 timer, fm = gettimer(ui, opts)
2852 timer, fm = gettimer(ui, opts)
2852 s = repo.store
2853 s = repo.store
2853 s.fncache._load()
2854 s.fncache._load()
2854
2855
2855 def d():
2856 def d():
2856 for p in s.fncache.entries:
2857 for p in s.fncache.entries:
2857 s.encode(p)
2858 s.encode(p)
2858
2859
2859 timer(d)
2860 timer(d)
2860 fm.end()
2861 fm.end()
2861
2862
2862
2863
2863 def _bdiffworker(q, blocks, xdiff, ready, done):
2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2864 while not done.is_set():
2865 while not done.is_set():
2865 pair = q.get()
2866 pair = q.get()
2866 while pair is not None:
2867 while pair is not None:
2867 if xdiff:
2868 if xdiff:
2868 mdiff.bdiff.xdiffblocks(*pair)
2869 mdiff.bdiff.xdiffblocks(*pair)
2869 elif blocks:
2870 elif blocks:
2870 mdiff.bdiff.blocks(*pair)
2871 mdiff.bdiff.blocks(*pair)
2871 else:
2872 else:
2872 mdiff.textdiff(*pair)
2873 mdiff.textdiff(*pair)
2873 q.task_done()
2874 q.task_done()
2874 pair = q.get()
2875 pair = q.get()
2875 q.task_done() # for the None one
2876 q.task_done() # for the None one
2876 with ready:
2877 with ready:
2877 ready.wait()
2878 ready.wait()
2878
2879
2879
2880
2880 def _manifestrevision(repo, mnode):
2881 def _manifestrevision(repo, mnode):
2881 ml = repo.manifestlog
2882 ml = repo.manifestlog
2882
2883
2883 if util.safehasattr(ml, b'getstorage'):
2884 if util.safehasattr(ml, b'getstorage'):
2884 store = ml.getstorage(b'')
2885 store = ml.getstorage(b'')
2885 else:
2886 else:
2886 store = ml._revlog
2887 store = ml._revlog
2887
2888
2888 return store.revision(mnode)
2889 return store.revision(mnode)
2889
2890
2890
2891
2891 @command(
2892 @command(
2892 b'perf::bdiff|perfbdiff',
2893 b'perf::bdiff|perfbdiff',
2893 revlogopts
2894 revlogopts
2894 + formatteropts
2895 + formatteropts
2895 + [
2896 + [
2896 (
2897 (
2897 b'',
2898 b'',
2898 b'count',
2899 b'count',
2899 1,
2900 1,
2900 b'number of revisions to test (when using --startrev)',
2901 b'number of revisions to test (when using --startrev)',
2901 ),
2902 ),
2902 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2903 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2904 (b'', b'blocks', False, b'test computing diffs into blocks'),
2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2905 (b'', b'xdiff', False, b'use xdiff algorithm'),
2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2906 ],
2907 ],
2907 b'-c|-m|FILE REV',
2908 b'-c|-m|FILE REV',
2908 )
2909 )
2909 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2910 """benchmark a bdiff between revisions
2911 """benchmark a bdiff between revisions
2911
2912
2912 By default, benchmark a bdiff between its delta parent and itself.
2913 By default, benchmark a bdiff between its delta parent and itself.
2913
2914
2914 With ``--count``, benchmark bdiffs between delta parents and self for N
2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2915 revisions starting at the specified revision.
2916 revisions starting at the specified revision.
2916
2917
2917 With ``--alldata``, assume the requested revision is a changeset and
2918 With ``--alldata``, assume the requested revision is a changeset and
2918 measure bdiffs for all changes related to that changeset (manifest
2919 measure bdiffs for all changes related to that changeset (manifest
2919 and filelogs).
2920 and filelogs).
2920 """
2921 """
2921 opts = _byteskwargs(opts)
2922 opts = _byteskwargs(opts)
2922
2923
2923 if opts[b'xdiff'] and not opts[b'blocks']:
2924 if opts[b'xdiff'] and not opts[b'blocks']:
2924 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2925
2926
2926 if opts[b'alldata']:
2927 if opts[b'alldata']:
2927 opts[b'changelog'] = True
2928 opts[b'changelog'] = True
2928
2929
2929 if opts.get(b'changelog') or opts.get(b'manifest'):
2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2930 file_, rev = None, file_
2931 file_, rev = None, file_
2931 elif rev is None:
2932 elif rev is None:
2932 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2933
2934
2934 blocks = opts[b'blocks']
2935 blocks = opts[b'blocks']
2935 xdiff = opts[b'xdiff']
2936 xdiff = opts[b'xdiff']
2936 textpairs = []
2937 textpairs = []
2937
2938
2938 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2939
2940
2940 startrev = r.rev(r.lookup(rev))
2941 startrev = r.rev(r.lookup(rev))
2941 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2942 if opts[b'alldata']:
2943 if opts[b'alldata']:
2943 # Load revisions associated with changeset.
2944 # Load revisions associated with changeset.
2944 ctx = repo[rev]
2945 ctx = repo[rev]
2945 mtext = _manifestrevision(repo, ctx.manifestnode())
2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2946 for pctx in ctx.parents():
2947 for pctx in ctx.parents():
2947 pman = _manifestrevision(repo, pctx.manifestnode())
2948 pman = _manifestrevision(repo, pctx.manifestnode())
2948 textpairs.append((pman, mtext))
2949 textpairs.append((pman, mtext))
2949
2950
2950 # Load filelog revisions by iterating manifest delta.
2951 # Load filelog revisions by iterating manifest delta.
2951 man = ctx.manifest()
2952 man = ctx.manifest()
2952 pman = ctx.p1().manifest()
2953 pman = ctx.p1().manifest()
2953 for filename, change in pman.diff(man).items():
2954 for filename, change in pman.diff(man).items():
2954 fctx = repo.file(filename)
2955 fctx = repo.file(filename)
2955 f1 = fctx.revision(change[0][0] or -1)
2956 f1 = fctx.revision(change[0][0] or -1)
2956 f2 = fctx.revision(change[1][0] or -1)
2957 f2 = fctx.revision(change[1][0] or -1)
2957 textpairs.append((f1, f2))
2958 textpairs.append((f1, f2))
2958 else:
2959 else:
2959 dp = r.deltaparent(rev)
2960 dp = r.deltaparent(rev)
2960 textpairs.append((r.revision(dp), r.revision(rev)))
2961 textpairs.append((r.revision(dp), r.revision(rev)))
2961
2962
2962 withthreads = threads > 0
2963 withthreads = threads > 0
2963 if not withthreads:
2964 if not withthreads:
2964
2965
2965 def d():
2966 def d():
2966 for pair in textpairs:
2967 for pair in textpairs:
2967 if xdiff:
2968 if xdiff:
2968 mdiff.bdiff.xdiffblocks(*pair)
2969 mdiff.bdiff.xdiffblocks(*pair)
2969 elif blocks:
2970 elif blocks:
2970 mdiff.bdiff.blocks(*pair)
2971 mdiff.bdiff.blocks(*pair)
2971 else:
2972 else:
2972 mdiff.textdiff(*pair)
2973 mdiff.textdiff(*pair)
2973
2974
2974 else:
2975 else:
2975 q = queue()
2976 q = queue()
2976 for i in _xrange(threads):
2977 for i in _xrange(threads):
2977 q.put(None)
2978 q.put(None)
2978 ready = threading.Condition()
2979 ready = threading.Condition()
2979 done = threading.Event()
2980 done = threading.Event()
2980 for i in _xrange(threads):
2981 for i in _xrange(threads):
2981 threading.Thread(
2982 threading.Thread(
2982 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2983 ).start()
2984 ).start()
2984 q.join()
2985 q.join()
2985
2986
2986 def d():
2987 def d():
2987 for pair in textpairs:
2988 for pair in textpairs:
2988 q.put(pair)
2989 q.put(pair)
2989 for i in _xrange(threads):
2990 for i in _xrange(threads):
2990 q.put(None)
2991 q.put(None)
2991 with ready:
2992 with ready:
2992 ready.notify_all()
2993 ready.notify_all()
2993 q.join()
2994 q.join()
2994
2995
2995 timer, fm = gettimer(ui, opts)
2996 timer, fm = gettimer(ui, opts)
2996 timer(d)
2997 timer(d)
2997 fm.end()
2998 fm.end()
2998
2999
2999 if withthreads:
3000 if withthreads:
3000 done.set()
3001 done.set()
3001 for i in _xrange(threads):
3002 for i in _xrange(threads):
3002 q.put(None)
3003 q.put(None)
3003 with ready:
3004 with ready:
3004 ready.notify_all()
3005 ready.notify_all()
3005
3006
3006
3007
3007 @command(
3008 @command(
3008 b'perf::unbundle',
3009 b'perf::unbundle',
3009 formatteropts,
3010 formatteropts,
3010 b'BUNDLE_FILE',
3011 b'BUNDLE_FILE',
3011 )
3012 )
3012 def perf_unbundle(ui, repo, fname, **opts):
3013 def perf_unbundle(ui, repo, fname, **opts):
3013 """benchmark application of a bundle in a repository.
3014 """benchmark application of a bundle in a repository.
3014
3015
3015 This does not include the final transaction processing"""
3016 This does not include the final transaction processing"""
3016
3017
3017 from mercurial import exchange
3018 from mercurial import exchange
3018 from mercurial import bundle2
3019 from mercurial import bundle2
3019 from mercurial import transaction
3020 from mercurial import transaction
3020
3021
3021 opts = _byteskwargs(opts)
3022 opts = _byteskwargs(opts)
3022
3023
3023 ### some compatibility hotfix
3024 ### some compatibility hotfix
3024 #
3025 #
3025 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3026 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3026 # critical regression that break transaction rollback for files that are
3027 # critical regression that break transaction rollback for files that are
3027 # de-inlined.
3028 # de-inlined.
3028 method = transaction.transaction._addentry
3029 method = transaction.transaction._addentry
3029 pre_63edc384d3b7 = "data" in getargspec(method).args
3030 pre_63edc384d3b7 = "data" in getargspec(method).args
3030 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3031 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3031 # a changeset that is a close descendant of 18415fc918a1, the changeset
3032 # a changeset that is a close descendant of 18415fc918a1, the changeset
3032 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3033 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3033 args = getargspec(error.Abort.__init__).args
3034 args = getargspec(error.Abort.__init__).args
3034 post_18415fc918a1 = "detailed_exit_code" in args
3035 post_18415fc918a1 = "detailed_exit_code" in args
3035
3036
3036 old_max_inline = None
3037 old_max_inline = None
3037 try:
3038 try:
3038 if not (pre_63edc384d3b7 or post_18415fc918a1):
3039 if not (pre_63edc384d3b7 or post_18415fc918a1):
3039 # disable inlining
3040 # disable inlining
3040 old_max_inline = mercurial.revlog._maxinline
3041 old_max_inline = mercurial.revlog._maxinline
3041 # large enough to never happen
3042 # large enough to never happen
3042 mercurial.revlog._maxinline = 2 ** 50
3043 mercurial.revlog._maxinline = 2 ** 50
3043
3044
3044 with repo.lock():
3045 with repo.lock():
3045 bundle = [None, None]
3046 bundle = [None, None]
3046 orig_quiet = repo.ui.quiet
3047 orig_quiet = repo.ui.quiet
3047 try:
3048 try:
3048 repo.ui.quiet = True
3049 repo.ui.quiet = True
3049 with open(fname, mode="rb") as f:
3050 with open(fname, mode="rb") as f:
3050
3051
3051 def noop_report(*args, **kwargs):
3052 def noop_report(*args, **kwargs):
3052 pass
3053 pass
3053
3054
3054 def setup():
3055 def setup():
3055 gen, tr = bundle
3056 gen, tr = bundle
3056 if tr is not None:
3057 if tr is not None:
3057 tr.abort()
3058 tr.abort()
3058 bundle[:] = [None, None]
3059 bundle[:] = [None, None]
3059 f.seek(0)
3060 f.seek(0)
3060 bundle[0] = exchange.readbundle(ui, f, fname)
3061 bundle[0] = exchange.readbundle(ui, f, fname)
3061 bundle[1] = repo.transaction(b'perf::unbundle')
3062 bundle[1] = repo.transaction(b'perf::unbundle')
3062 # silence the transaction
3063 # silence the transaction
3063 bundle[1]._report = noop_report
3064 bundle[1]._report = noop_report
3064
3065
3065 def apply():
3066 def apply():
3066 gen, tr = bundle
3067 gen, tr = bundle
3067 bundle2.applybundle(
3068 bundle2.applybundle(
3068 repo,
3069 repo,
3069 gen,
3070 gen,
3070 tr,
3071 tr,
3071 source=b'perf::unbundle',
3072 source=b'perf::unbundle',
3072 url=fname,
3073 url=fname,
3073 )
3074 )
3074
3075
3075 timer, fm = gettimer(ui, opts)
3076 timer, fm = gettimer(ui, opts)
3076 timer(apply, setup=setup)
3077 timer(apply, setup=setup)
3077 fm.end()
3078 fm.end()
3078 finally:
3079 finally:
3079 repo.ui.quiet == orig_quiet
3080 repo.ui.quiet == orig_quiet
3080 gen, tr = bundle
3081 gen, tr = bundle
3081 if tr is not None:
3082 if tr is not None:
3082 tr.abort()
3083 tr.abort()
3083 finally:
3084 finally:
3084 if old_max_inline is not None:
3085 if old_max_inline is not None:
3085 mercurial.revlog._maxinline = old_max_inline
3086 mercurial.revlog._maxinline = old_max_inline
3086
3087
3087
3088
3088 @command(
3089 @command(
3089 b'perf::unidiff|perfunidiff',
3090 b'perf::unidiff|perfunidiff',
3090 revlogopts
3091 revlogopts
3091 + formatteropts
3092 + formatteropts
3092 + [
3093 + [
3093 (
3094 (
3094 b'',
3095 b'',
3095 b'count',
3096 b'count',
3096 1,
3097 1,
3097 b'number of revisions to test (when using --startrev)',
3098 b'number of revisions to test (when using --startrev)',
3098 ),
3099 ),
3099 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3100 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3100 ],
3101 ],
3101 b'-c|-m|FILE REV',
3102 b'-c|-m|FILE REV',
3102 )
3103 )
3103 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3104 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3104 """benchmark a unified diff between revisions
3105 """benchmark a unified diff between revisions
3105
3106
3106 This doesn't include any copy tracing - it's just a unified diff
3107 This doesn't include any copy tracing - it's just a unified diff
3107 of the texts.
3108 of the texts.
3108
3109
3109 By default, benchmark a diff between its delta parent and itself.
3110 By default, benchmark a diff between its delta parent and itself.
3110
3111
3111 With ``--count``, benchmark diffs between delta parents and self for N
3112 With ``--count``, benchmark diffs between delta parents and self for N
3112 revisions starting at the specified revision.
3113 revisions starting at the specified revision.
3113
3114
3114 With ``--alldata``, assume the requested revision is a changeset and
3115 With ``--alldata``, assume the requested revision is a changeset and
3115 measure diffs for all changes related to that changeset (manifest
3116 measure diffs for all changes related to that changeset (manifest
3116 and filelogs).
3117 and filelogs).
3117 """
3118 """
3118 opts = _byteskwargs(opts)
3119 opts = _byteskwargs(opts)
3119 if opts[b'alldata']:
3120 if opts[b'alldata']:
3120 opts[b'changelog'] = True
3121 opts[b'changelog'] = True
3121
3122
3122 if opts.get(b'changelog') or opts.get(b'manifest'):
3123 if opts.get(b'changelog') or opts.get(b'manifest'):
3123 file_, rev = None, file_
3124 file_, rev = None, file_
3124 elif rev is None:
3125 elif rev is None:
3125 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3126 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3126
3127
3127 textpairs = []
3128 textpairs = []
3128
3129
3129 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3130 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3130
3131
3131 startrev = r.rev(r.lookup(rev))
3132 startrev = r.rev(r.lookup(rev))
3132 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3133 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3133 if opts[b'alldata']:
3134 if opts[b'alldata']:
3134 # Load revisions associated with changeset.
3135 # Load revisions associated with changeset.
3135 ctx = repo[rev]
3136 ctx = repo[rev]
3136 mtext = _manifestrevision(repo, ctx.manifestnode())
3137 mtext = _manifestrevision(repo, ctx.manifestnode())
3137 for pctx in ctx.parents():
3138 for pctx in ctx.parents():
3138 pman = _manifestrevision(repo, pctx.manifestnode())
3139 pman = _manifestrevision(repo, pctx.manifestnode())
3139 textpairs.append((pman, mtext))
3140 textpairs.append((pman, mtext))
3140
3141
3141 # Load filelog revisions by iterating manifest delta.
3142 # Load filelog revisions by iterating manifest delta.
3142 man = ctx.manifest()
3143 man = ctx.manifest()
3143 pman = ctx.p1().manifest()
3144 pman = ctx.p1().manifest()
3144 for filename, change in pman.diff(man).items():
3145 for filename, change in pman.diff(man).items():
3145 fctx = repo.file(filename)
3146 fctx = repo.file(filename)
3146 f1 = fctx.revision(change[0][0] or -1)
3147 f1 = fctx.revision(change[0][0] or -1)
3147 f2 = fctx.revision(change[1][0] or -1)
3148 f2 = fctx.revision(change[1][0] or -1)
3148 textpairs.append((f1, f2))
3149 textpairs.append((f1, f2))
3149 else:
3150 else:
3150 dp = r.deltaparent(rev)
3151 dp = r.deltaparent(rev)
3151 textpairs.append((r.revision(dp), r.revision(rev)))
3152 textpairs.append((r.revision(dp), r.revision(rev)))
3152
3153
3153 def d():
3154 def d():
3154 for left, right in textpairs:
3155 for left, right in textpairs:
3155 # The date strings don't matter, so we pass empty strings.
3156 # The date strings don't matter, so we pass empty strings.
3156 headerlines, hunks = mdiff.unidiff(
3157 headerlines, hunks = mdiff.unidiff(
3157 left, b'', right, b'', b'left', b'right', binary=False
3158 left, b'', right, b'', b'left', b'right', binary=False
3158 )
3159 )
3159 # consume iterators in roughly the way patch.py does
3160 # consume iterators in roughly the way patch.py does
3160 b'\n'.join(headerlines)
3161 b'\n'.join(headerlines)
3161 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3162 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3162
3163
3163 timer, fm = gettimer(ui, opts)
3164 timer, fm = gettimer(ui, opts)
3164 timer(d)
3165 timer(d)
3165 fm.end()
3166 fm.end()
3166
3167
3167
3168
3168 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3169 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3169 def perfdiffwd(ui, repo, **opts):
3170 def perfdiffwd(ui, repo, **opts):
3170 """Profile diff of working directory changes"""
3171 """Profile diff of working directory changes"""
3171 opts = _byteskwargs(opts)
3172 opts = _byteskwargs(opts)
3172 timer, fm = gettimer(ui, opts)
3173 timer, fm = gettimer(ui, opts)
3173 options = {
3174 options = {
3174 'w': 'ignore_all_space',
3175 'w': 'ignore_all_space',
3175 'b': 'ignore_space_change',
3176 'b': 'ignore_space_change',
3176 'B': 'ignore_blank_lines',
3177 'B': 'ignore_blank_lines',
3177 }
3178 }
3178
3179
3179 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3180 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3180 opts = {options[c]: b'1' for c in diffopt}
3181 opts = {options[c]: b'1' for c in diffopt}
3181
3182
3182 def d():
3183 def d():
3183 ui.pushbuffer()
3184 ui.pushbuffer()
3184 commands.diff(ui, repo, **opts)
3185 commands.diff(ui, repo, **opts)
3185 ui.popbuffer()
3186 ui.popbuffer()
3186
3187
3187 diffopt = diffopt.encode('ascii')
3188 diffopt = diffopt.encode('ascii')
3188 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3189 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3189 timer(d, title=title)
3190 timer(d, title=title)
3190 fm.end()
3191 fm.end()
3191
3192
3192
3193
3193 @command(
3194 @command(
3194 b'perf::revlogindex|perfrevlogindex',
3195 b'perf::revlogindex|perfrevlogindex',
3195 revlogopts + formatteropts,
3196 revlogopts + formatteropts,
3196 b'-c|-m|FILE',
3197 b'-c|-m|FILE',
3197 )
3198 )
3198 def perfrevlogindex(ui, repo, file_=None, **opts):
3199 def perfrevlogindex(ui, repo, file_=None, **opts):
3199 """Benchmark operations against a revlog index.
3200 """Benchmark operations against a revlog index.
3200
3201
3201 This tests constructing a revlog instance, reading index data,
3202 This tests constructing a revlog instance, reading index data,
3202 parsing index data, and performing various operations related to
3203 parsing index data, and performing various operations related to
3203 index data.
3204 index data.
3204 """
3205 """
3205
3206
3206 opts = _byteskwargs(opts)
3207 opts = _byteskwargs(opts)
3207
3208
3208 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3209 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3209
3210
3210 opener = getattr(rl, 'opener') # trick linter
3211 opener = getattr(rl, 'opener') # trick linter
3211 # compat with hg <= 5.8
3212 # compat with hg <= 5.8
3212 radix = getattr(rl, 'radix', None)
3213 radix = getattr(rl, 'radix', None)
3213 indexfile = getattr(rl, '_indexfile', None)
3214 indexfile = getattr(rl, '_indexfile', None)
3214 if indexfile is None:
3215 if indexfile is None:
3215 # compatibility with <= hg-5.8
3216 # compatibility with <= hg-5.8
3216 indexfile = getattr(rl, 'indexfile')
3217 indexfile = getattr(rl, 'indexfile')
3217 data = opener.read(indexfile)
3218 data = opener.read(indexfile)
3218
3219
3219 header = struct.unpack(b'>I', data[0:4])[0]
3220 header = struct.unpack(b'>I', data[0:4])[0]
3220 version = header & 0xFFFF
3221 version = header & 0xFFFF
3221 if version == 1:
3222 if version == 1:
3222 inline = header & (1 << 16)
3223 inline = header & (1 << 16)
3223 else:
3224 else:
3224 raise error.Abort(b'unsupported revlog version: %d' % version)
3225 raise error.Abort(b'unsupported revlog version: %d' % version)
3225
3226
3226 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3227 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3227 if parse_index_v1 is None:
3228 if parse_index_v1 is None:
3228 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3229 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3229
3230
3230 rllen = len(rl)
3231 rllen = len(rl)
3231
3232
3232 node0 = rl.node(0)
3233 node0 = rl.node(0)
3233 node25 = rl.node(rllen // 4)
3234 node25 = rl.node(rllen // 4)
3234 node50 = rl.node(rllen // 2)
3235 node50 = rl.node(rllen // 2)
3235 node75 = rl.node(rllen // 4 * 3)
3236 node75 = rl.node(rllen // 4 * 3)
3236 node100 = rl.node(rllen - 1)
3237 node100 = rl.node(rllen - 1)
3237
3238
3238 allrevs = range(rllen)
3239 allrevs = range(rllen)
3239 allrevsrev = list(reversed(allrevs))
3240 allrevsrev = list(reversed(allrevs))
3240 allnodes = [rl.node(rev) for rev in range(rllen)]
3241 allnodes = [rl.node(rev) for rev in range(rllen)]
3241 allnodesrev = list(reversed(allnodes))
3242 allnodesrev = list(reversed(allnodes))
3242
3243
3243 def constructor():
3244 def constructor():
3244 if radix is not None:
3245 if radix is not None:
3245 revlog(opener, radix=radix)
3246 revlog(opener, radix=radix)
3246 else:
3247 else:
3247 # hg <= 5.8
3248 # hg <= 5.8
3248 revlog(opener, indexfile=indexfile)
3249 revlog(opener, indexfile=indexfile)
3249
3250
3250 def read():
3251 def read():
3251 with opener(indexfile) as fh:
3252 with opener(indexfile) as fh:
3252 fh.read()
3253 fh.read()
3253
3254
3254 def parseindex():
3255 def parseindex():
3255 parse_index_v1(data, inline)
3256 parse_index_v1(data, inline)
3256
3257
3257 def getentry(revornode):
3258 def getentry(revornode):
3258 index = parse_index_v1(data, inline)[0]
3259 index = parse_index_v1(data, inline)[0]
3259 index[revornode]
3260 index[revornode]
3260
3261
3261 def getentries(revs, count=1):
3262 def getentries(revs, count=1):
3262 index = parse_index_v1(data, inline)[0]
3263 index = parse_index_v1(data, inline)[0]
3263
3264
3264 for i in range(count):
3265 for i in range(count):
3265 for rev in revs:
3266 for rev in revs:
3266 index[rev]
3267 index[rev]
3267
3268
3268 def resolvenode(node):
3269 def resolvenode(node):
3269 index = parse_index_v1(data, inline)[0]
3270 index = parse_index_v1(data, inline)[0]
3270 rev = getattr(index, 'rev', None)
3271 rev = getattr(index, 'rev', None)
3271 if rev is None:
3272 if rev is None:
3272 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3273 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3273 # This only works for the C code.
3274 # This only works for the C code.
3274 if nodemap is None:
3275 if nodemap is None:
3275 return
3276 return
3276 rev = nodemap.__getitem__
3277 rev = nodemap.__getitem__
3277
3278
3278 try:
3279 try:
3279 rev(node)
3280 rev(node)
3280 except error.RevlogError:
3281 except error.RevlogError:
3281 pass
3282 pass
3282
3283
3283 def resolvenodes(nodes, count=1):
3284 def resolvenodes(nodes, count=1):
3284 index = parse_index_v1(data, inline)[0]
3285 index = parse_index_v1(data, inline)[0]
3285 rev = getattr(index, 'rev', None)
3286 rev = getattr(index, 'rev', None)
3286 if rev is None:
3287 if rev is None:
3287 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3288 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3288 # This only works for the C code.
3289 # This only works for the C code.
3289 if nodemap is None:
3290 if nodemap is None:
3290 return
3291 return
3291 rev = nodemap.__getitem__
3292 rev = nodemap.__getitem__
3292
3293
3293 for i in range(count):
3294 for i in range(count):
3294 for node in nodes:
3295 for node in nodes:
3295 try:
3296 try:
3296 rev(node)
3297 rev(node)
3297 except error.RevlogError:
3298 except error.RevlogError:
3298 pass
3299 pass
3299
3300
3300 benches = [
3301 benches = [
3301 (constructor, b'revlog constructor'),
3302 (constructor, b'revlog constructor'),
3302 (read, b'read'),
3303 (read, b'read'),
3303 (parseindex, b'create index object'),
3304 (parseindex, b'create index object'),
3304 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3305 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3305 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3306 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3306 (lambda: resolvenode(node0), b'look up node at rev 0'),
3307 (lambda: resolvenode(node0), b'look up node at rev 0'),
3307 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3308 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3308 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3309 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3309 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3310 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3310 (lambda: resolvenode(node100), b'look up node at tip'),
3311 (lambda: resolvenode(node100), b'look up node at tip'),
3311 # 2x variation is to measure caching impact.
3312 # 2x variation is to measure caching impact.
3312 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3313 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3313 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3314 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3314 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3315 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3315 (
3316 (
3316 lambda: resolvenodes(allnodesrev, 2),
3317 lambda: resolvenodes(allnodesrev, 2),
3317 b'look up all nodes 2x (reverse)',
3318 b'look up all nodes 2x (reverse)',
3318 ),
3319 ),
3319 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3320 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3320 (
3321 (
3321 lambda: getentries(allrevs, 2),
3322 lambda: getentries(allrevs, 2),
3322 b'retrieve all index entries 2x (forward)',
3323 b'retrieve all index entries 2x (forward)',
3323 ),
3324 ),
3324 (
3325 (
3325 lambda: getentries(allrevsrev),
3326 lambda: getentries(allrevsrev),
3326 b'retrieve all index entries (reverse)',
3327 b'retrieve all index entries (reverse)',
3327 ),
3328 ),
3328 (
3329 (
3329 lambda: getentries(allrevsrev, 2),
3330 lambda: getentries(allrevsrev, 2),
3330 b'retrieve all index entries 2x (reverse)',
3331 b'retrieve all index entries 2x (reverse)',
3331 ),
3332 ),
3332 ]
3333 ]
3333
3334
3334 for fn, title in benches:
3335 for fn, title in benches:
3335 timer, fm = gettimer(ui, opts)
3336 timer, fm = gettimer(ui, opts)
3336 timer(fn, title=title)
3337 timer(fn, title=title)
3337 fm.end()
3338 fm.end()
3338
3339
3339
3340
3340 @command(
3341 @command(
3341 b'perf::revlogrevisions|perfrevlogrevisions',
3342 b'perf::revlogrevisions|perfrevlogrevisions',
3342 revlogopts
3343 revlogopts
3343 + formatteropts
3344 + formatteropts
3344 + [
3345 + [
3345 (b'd', b'dist', 100, b'distance between the revisions'),
3346 (b'd', b'dist', 100, b'distance between the revisions'),
3346 (b's', b'startrev', 0, b'revision to start reading at'),
3347 (b's', b'startrev', 0, b'revision to start reading at'),
3347 (b'', b'reverse', False, b'read in reverse'),
3348 (b'', b'reverse', False, b'read in reverse'),
3348 ],
3349 ],
3349 b'-c|-m|FILE',
3350 b'-c|-m|FILE',
3350 )
3351 )
3351 def perfrevlogrevisions(
3352 def perfrevlogrevisions(
3352 ui, repo, file_=None, startrev=0, reverse=False, **opts
3353 ui, repo, file_=None, startrev=0, reverse=False, **opts
3353 ):
3354 ):
3354 """Benchmark reading a series of revisions from a revlog.
3355 """Benchmark reading a series of revisions from a revlog.
3355
3356
3356 By default, we read every ``-d/--dist`` revision from 0 to tip of
3357 By default, we read every ``-d/--dist`` revision from 0 to tip of
3357 the specified revlog.
3358 the specified revlog.
3358
3359
3359 The start revision can be defined via ``-s/--startrev``.
3360 The start revision can be defined via ``-s/--startrev``.
3360 """
3361 """
3361 opts = _byteskwargs(opts)
3362 opts = _byteskwargs(opts)
3362
3363
3363 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3364 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3364 rllen = getlen(ui)(rl)
3365 rllen = getlen(ui)(rl)
3365
3366
3366 if startrev < 0:
3367 if startrev < 0:
3367 startrev = rllen + startrev
3368 startrev = rllen + startrev
3368
3369
3369 def d():
3370 def d():
3370 rl.clearcaches()
3371 rl.clearcaches()
3371
3372
3372 beginrev = startrev
3373 beginrev = startrev
3373 endrev = rllen
3374 endrev = rllen
3374 dist = opts[b'dist']
3375 dist = opts[b'dist']
3375
3376
3376 if reverse:
3377 if reverse:
3377 beginrev, endrev = endrev - 1, beginrev - 1
3378 beginrev, endrev = endrev - 1, beginrev - 1
3378 dist = -1 * dist
3379 dist = -1 * dist
3379
3380
3380 for x in _xrange(beginrev, endrev, dist):
3381 for x in _xrange(beginrev, endrev, dist):
3381 # Old revisions don't support passing int.
3382 # Old revisions don't support passing int.
3382 n = rl.node(x)
3383 n = rl.node(x)
3383 rl.revision(n)
3384 rl.revision(n)
3384
3385
3385 timer, fm = gettimer(ui, opts)
3386 timer, fm = gettimer(ui, opts)
3386 timer(d)
3387 timer(d)
3387 fm.end()
3388 fm.end()
3388
3389
3389
3390
3390 @command(
3391 @command(
3391 b'perf::revlogwrite|perfrevlogwrite',
3392 b'perf::revlogwrite|perfrevlogwrite',
3392 revlogopts
3393 revlogopts
3393 + formatteropts
3394 + formatteropts
3394 + [
3395 + [
3395 (b's', b'startrev', 1000, b'revision to start writing at'),
3396 (b's', b'startrev', 1000, b'revision to start writing at'),
3396 (b'', b'stoprev', -1, b'last revision to write'),
3397 (b'', b'stoprev', -1, b'last revision to write'),
3397 (b'', b'count', 3, b'number of passes to perform'),
3398 (b'', b'count', 3, b'number of passes to perform'),
3398 (b'', b'details', False, b'print timing for every revisions tested'),
3399 (b'', b'details', False, b'print timing for every revisions tested'),
3399 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3400 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3400 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3401 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3401 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3402 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3402 ],
3403 ],
3403 b'-c|-m|FILE',
3404 b'-c|-m|FILE',
3404 )
3405 )
3405 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3406 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3406 """Benchmark writing a series of revisions to a revlog.
3407 """Benchmark writing a series of revisions to a revlog.
3407
3408
3408 Possible source values are:
3409 Possible source values are:
3409 * `full`: add from a full text (default).
3410 * `full`: add from a full text (default).
3410 * `parent-1`: add from a delta to the first parent
3411 * `parent-1`: add from a delta to the first parent
3411 * `parent-2`: add from a delta to the second parent if it exists
3412 * `parent-2`: add from a delta to the second parent if it exists
3412 (use a delta from the first parent otherwise)
3413 (use a delta from the first parent otherwise)
3413 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3414 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3414 * `storage`: add from the existing precomputed deltas
3415 * `storage`: add from the existing precomputed deltas
3415
3416
3416 Note: This performance command measures performance in a custom way. As a
3417 Note: This performance command measures performance in a custom way. As a
3417 result some of the global configuration of the 'perf' command does not
3418 result some of the global configuration of the 'perf' command does not
3418 apply to it:
3419 apply to it:
3419
3420
3420 * ``pre-run``: disabled
3421 * ``pre-run``: disabled
3421
3422
3422 * ``profile-benchmark``: disabled
3423 * ``profile-benchmark``: disabled
3423
3424
3424 * ``run-limits``: disabled use --count instead
3425 * ``run-limits``: disabled use --count instead
3425 """
3426 """
3426 opts = _byteskwargs(opts)
3427 opts = _byteskwargs(opts)
3427
3428
3428 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3429 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3429 rllen = getlen(ui)(rl)
3430 rllen = getlen(ui)(rl)
3430 if startrev < 0:
3431 if startrev < 0:
3431 startrev = rllen + startrev
3432 startrev = rllen + startrev
3432 if stoprev < 0:
3433 if stoprev < 0:
3433 stoprev = rllen + stoprev
3434 stoprev = rllen + stoprev
3434
3435
3435 lazydeltabase = opts['lazydeltabase']
3436 lazydeltabase = opts['lazydeltabase']
3436 source = opts['source']
3437 source = opts['source']
3437 clearcaches = opts['clear_caches']
3438 clearcaches = opts['clear_caches']
3438 validsource = (
3439 validsource = (
3439 b'full',
3440 b'full',
3440 b'parent-1',
3441 b'parent-1',
3441 b'parent-2',
3442 b'parent-2',
3442 b'parent-smallest',
3443 b'parent-smallest',
3443 b'storage',
3444 b'storage',
3444 )
3445 )
3445 if source not in validsource:
3446 if source not in validsource:
3446 raise error.Abort('invalid source type: %s' % source)
3447 raise error.Abort('invalid source type: %s' % source)
3447
3448
3448 ### actually gather results
3449 ### actually gather results
3449 count = opts['count']
3450 count = opts['count']
3450 if count <= 0:
3451 if count <= 0:
3451 raise error.Abort('invalide run count: %d' % count)
3452 raise error.Abort('invalide run count: %d' % count)
3452 allresults = []
3453 allresults = []
3453 for c in range(count):
3454 for c in range(count):
3454 timing = _timeonewrite(
3455 timing = _timeonewrite(
3455 ui,
3456 ui,
3456 rl,
3457 rl,
3457 source,
3458 source,
3458 startrev,
3459 startrev,
3459 stoprev,
3460 stoprev,
3460 c + 1,
3461 c + 1,
3461 lazydeltabase=lazydeltabase,
3462 lazydeltabase=lazydeltabase,
3462 clearcaches=clearcaches,
3463 clearcaches=clearcaches,
3463 )
3464 )
3464 allresults.append(timing)
3465 allresults.append(timing)
3465
3466
3466 ### consolidate the results in a single list
3467 ### consolidate the results in a single list
3467 results = []
3468 results = []
3468 for idx, (rev, t) in enumerate(allresults[0]):
3469 for idx, (rev, t) in enumerate(allresults[0]):
3469 ts = [t]
3470 ts = [t]
3470 for other in allresults[1:]:
3471 for other in allresults[1:]:
3471 orev, ot = other[idx]
3472 orev, ot = other[idx]
3472 assert orev == rev
3473 assert orev == rev
3473 ts.append(ot)
3474 ts.append(ot)
3474 results.append((rev, ts))
3475 results.append((rev, ts))
3475 resultcount = len(results)
3476 resultcount = len(results)
3476
3477
3477 ### Compute and display relevant statistics
3478 ### Compute and display relevant statistics
3478
3479
3479 # get a formatter
3480 # get a formatter
3480 fm = ui.formatter(b'perf', opts)
3481 fm = ui.formatter(b'perf', opts)
3481 displayall = ui.configbool(b"perf", b"all-timing", True)
3482 displayall = ui.configbool(b"perf", b"all-timing", True)
3482
3483
3483 # print individual details if requested
3484 # print individual details if requested
3484 if opts['details']:
3485 if opts['details']:
3485 for idx, item in enumerate(results, 1):
3486 for idx, item in enumerate(results, 1):
3486 rev, data = item
3487 rev, data = item
3487 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3488 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3488 formatone(fm, data, title=title, displayall=displayall)
3489 formatone(fm, data, title=title, displayall=displayall)
3489
3490
3490 # sorts results by median time
3491 # sorts results by median time
3491 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3492 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3492 # list of (name, index) to display)
3493 # list of (name, index) to display)
3493 relevants = [
3494 relevants = [
3494 ("min", 0),
3495 ("min", 0),
3495 ("10%", resultcount * 10 // 100),
3496 ("10%", resultcount * 10 // 100),
3496 ("25%", resultcount * 25 // 100),
3497 ("25%", resultcount * 25 // 100),
3497 ("50%", resultcount * 70 // 100),
3498 ("50%", resultcount * 70 // 100),
3498 ("75%", resultcount * 75 // 100),
3499 ("75%", resultcount * 75 // 100),
3499 ("90%", resultcount * 90 // 100),
3500 ("90%", resultcount * 90 // 100),
3500 ("95%", resultcount * 95 // 100),
3501 ("95%", resultcount * 95 // 100),
3501 ("99%", resultcount * 99 // 100),
3502 ("99%", resultcount * 99 // 100),
3502 ("99.9%", resultcount * 999 // 1000),
3503 ("99.9%", resultcount * 999 // 1000),
3503 ("99.99%", resultcount * 9999 // 10000),
3504 ("99.99%", resultcount * 9999 // 10000),
3504 ("99.999%", resultcount * 99999 // 100000),
3505 ("99.999%", resultcount * 99999 // 100000),
3505 ("max", -1),
3506 ("max", -1),
3506 ]
3507 ]
3507 if not ui.quiet:
3508 if not ui.quiet:
3508 for name, idx in relevants:
3509 for name, idx in relevants:
3509 data = results[idx]
3510 data = results[idx]
3510 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3511 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3511 formatone(fm, data[1], title=title, displayall=displayall)
3512 formatone(fm, data[1], title=title, displayall=displayall)
3512
3513
3513 # XXX summing that many float will not be very precise, we ignore this fact
3514 # XXX summing that many float will not be very precise, we ignore this fact
3514 # for now
3515 # for now
3515 totaltime = []
3516 totaltime = []
3516 for item in allresults:
3517 for item in allresults:
3517 totaltime.append(
3518 totaltime.append(
3518 (
3519 (
3519 sum(x[1][0] for x in item),
3520 sum(x[1][0] for x in item),
3520 sum(x[1][1] for x in item),
3521 sum(x[1][1] for x in item),
3521 sum(x[1][2] for x in item),
3522 sum(x[1][2] for x in item),
3522 )
3523 )
3523 )
3524 )
3524 formatone(
3525 formatone(
3525 fm,
3526 fm,
3526 totaltime,
3527 totaltime,
3527 title="total time (%d revs)" % resultcount,
3528 title="total time (%d revs)" % resultcount,
3528 displayall=displayall,
3529 displayall=displayall,
3529 )
3530 )
3530 fm.end()
3531 fm.end()
3531
3532
3532
3533
3533 class _faketr:
3534 class _faketr:
3534 def add(s, x, y, z=None):
3535 def add(s, x, y, z=None):
3535 return None
3536 return None
3536
3537
3537
3538
3538 def _timeonewrite(
3539 def _timeonewrite(
3539 ui,
3540 ui,
3540 orig,
3541 orig,
3541 source,
3542 source,
3542 startrev,
3543 startrev,
3543 stoprev,
3544 stoprev,
3544 runidx=None,
3545 runidx=None,
3545 lazydeltabase=True,
3546 lazydeltabase=True,
3546 clearcaches=True,
3547 clearcaches=True,
3547 ):
3548 ):
3548 timings = []
3549 timings = []
3549 tr = _faketr()
3550 tr = _faketr()
3550 with _temprevlog(ui, orig, startrev) as dest:
3551 with _temprevlog(ui, orig, startrev) as dest:
3551 if hasattr(dest, "delta_config"):
3552 if hasattr(dest, "delta_config"):
3552 dest.delta_config.lazy_delta_base = lazydeltabase
3553 dest.delta_config.lazy_delta_base = lazydeltabase
3553 else:
3554 else:
3554 dest._lazydeltabase = lazydeltabase
3555 dest._lazydeltabase = lazydeltabase
3555 revs = list(orig.revs(startrev, stoprev))
3556 revs = list(orig.revs(startrev, stoprev))
3556 total = len(revs)
3557 total = len(revs)
3557 topic = 'adding'
3558 topic = 'adding'
3558 if runidx is not None:
3559 if runidx is not None:
3559 topic += ' (run #%d)' % runidx
3560 topic += ' (run #%d)' % runidx
3560 # Support both old and new progress API
3561 # Support both old and new progress API
3561 if util.safehasattr(ui, 'makeprogress'):
3562 if util.safehasattr(ui, 'makeprogress'):
3562 progress = ui.makeprogress(topic, unit='revs', total=total)
3563 progress = ui.makeprogress(topic, unit='revs', total=total)
3563
3564
3564 def updateprogress(pos):
3565 def updateprogress(pos):
3565 progress.update(pos)
3566 progress.update(pos)
3566
3567
3567 def completeprogress():
3568 def completeprogress():
3568 progress.complete()
3569 progress.complete()
3569
3570
3570 else:
3571 else:
3571
3572
3572 def updateprogress(pos):
3573 def updateprogress(pos):
3573 ui.progress(topic, pos, unit='revs', total=total)
3574 ui.progress(topic, pos, unit='revs', total=total)
3574
3575
3575 def completeprogress():
3576 def completeprogress():
3576 ui.progress(topic, None, unit='revs', total=total)
3577 ui.progress(topic, None, unit='revs', total=total)
3577
3578
3578 for idx, rev in enumerate(revs):
3579 for idx, rev in enumerate(revs):
3579 updateprogress(idx)
3580 updateprogress(idx)
3580 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3581 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3581 if clearcaches:
3582 if clearcaches:
3582 dest.index.clearcaches()
3583 dest.index.clearcaches()
3583 dest.clearcaches()
3584 dest.clearcaches()
3584 with timeone() as r:
3585 with timeone() as r:
3585 dest.addrawrevision(*addargs, **addkwargs)
3586 dest.addrawrevision(*addargs, **addkwargs)
3586 timings.append((rev, r[0]))
3587 timings.append((rev, r[0]))
3587 updateprogress(total)
3588 updateprogress(total)
3588 completeprogress()
3589 completeprogress()
3589 return timings
3590 return timings
3590
3591
3591
3592
3592 def _getrevisionseed(orig, rev, tr, source):
3593 def _getrevisionseed(orig, rev, tr, source):
3593 from mercurial.node import nullid
3594 from mercurial.node import nullid
3594
3595
3595 linkrev = orig.linkrev(rev)
3596 linkrev = orig.linkrev(rev)
3596 node = orig.node(rev)
3597 node = orig.node(rev)
3597 p1, p2 = orig.parents(node)
3598 p1, p2 = orig.parents(node)
3598 flags = orig.flags(rev)
3599 flags = orig.flags(rev)
3599 cachedelta = None
3600 cachedelta = None
3600 text = None
3601 text = None
3601
3602
3602 if source == b'full':
3603 if source == b'full':
3603 text = orig.revision(rev)
3604 text = orig.revision(rev)
3604 elif source == b'parent-1':
3605 elif source == b'parent-1':
3605 baserev = orig.rev(p1)
3606 baserev = orig.rev(p1)
3606 cachedelta = (baserev, orig.revdiff(p1, rev))
3607 cachedelta = (baserev, orig.revdiff(p1, rev))
3607 elif source == b'parent-2':
3608 elif source == b'parent-2':
3608 parent = p2
3609 parent = p2
3609 if p2 == nullid:
3610 if p2 == nullid:
3610 parent = p1
3611 parent = p1
3611 baserev = orig.rev(parent)
3612 baserev = orig.rev(parent)
3612 cachedelta = (baserev, orig.revdiff(parent, rev))
3613 cachedelta = (baserev, orig.revdiff(parent, rev))
3613 elif source == b'parent-smallest':
3614 elif source == b'parent-smallest':
3614 p1diff = orig.revdiff(p1, rev)
3615 p1diff = orig.revdiff(p1, rev)
3615 parent = p1
3616 parent = p1
3616 diff = p1diff
3617 diff = p1diff
3617 if p2 != nullid:
3618 if p2 != nullid:
3618 p2diff = orig.revdiff(p2, rev)
3619 p2diff = orig.revdiff(p2, rev)
3619 if len(p1diff) > len(p2diff):
3620 if len(p1diff) > len(p2diff):
3620 parent = p2
3621 parent = p2
3621 diff = p2diff
3622 diff = p2diff
3622 baserev = orig.rev(parent)
3623 baserev = orig.rev(parent)
3623 cachedelta = (baserev, diff)
3624 cachedelta = (baserev, diff)
3624 elif source == b'storage':
3625 elif source == b'storage':
3625 baserev = orig.deltaparent(rev)
3626 baserev = orig.deltaparent(rev)
3626 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3627 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3627
3628
3628 return (
3629 return (
3629 (text, tr, linkrev, p1, p2),
3630 (text, tr, linkrev, p1, p2),
3630 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3631 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3631 )
3632 )
3632
3633
3633
3634
3634 @contextlib.contextmanager
3635 @contextlib.contextmanager
3635 def _temprevlog(ui, orig, truncaterev):
3636 def _temprevlog(ui, orig, truncaterev):
3636 from mercurial import vfs as vfsmod
3637 from mercurial import vfs as vfsmod
3637
3638
3638 if orig._inline:
3639 if orig._inline:
3639 raise error.Abort('not supporting inline revlog (yet)')
3640 raise error.Abort('not supporting inline revlog (yet)')
3640 revlogkwargs = {}
3641 revlogkwargs = {}
3641 k = 'upperboundcomp'
3642 k = 'upperboundcomp'
3642 if util.safehasattr(orig, k):
3643 if util.safehasattr(orig, k):
3643 revlogkwargs[k] = getattr(orig, k)
3644 revlogkwargs[k] = getattr(orig, k)
3644
3645
3645 indexfile = getattr(orig, '_indexfile', None)
3646 indexfile = getattr(orig, '_indexfile', None)
3646 if indexfile is None:
3647 if indexfile is None:
3647 # compatibility with <= hg-5.8
3648 # compatibility with <= hg-5.8
3648 indexfile = getattr(orig, 'indexfile')
3649 indexfile = getattr(orig, 'indexfile')
3649 origindexpath = orig.opener.join(indexfile)
3650 origindexpath = orig.opener.join(indexfile)
3650
3651
3651 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3652 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3652 origdatapath = orig.opener.join(datafile)
3653 origdatapath = orig.opener.join(datafile)
3653 radix = b'revlog'
3654 radix = b'revlog'
3654 indexname = b'revlog.i'
3655 indexname = b'revlog.i'
3655 dataname = b'revlog.d'
3656 dataname = b'revlog.d'
3656
3657
3657 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3658 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3658 try:
3659 try:
3659 # copy the data file in a temporary directory
3660 # copy the data file in a temporary directory
3660 ui.debug('copying data in %s\n' % tmpdir)
3661 ui.debug('copying data in %s\n' % tmpdir)
3661 destindexpath = os.path.join(tmpdir, 'revlog.i')
3662 destindexpath = os.path.join(tmpdir, 'revlog.i')
3662 destdatapath = os.path.join(tmpdir, 'revlog.d')
3663 destdatapath = os.path.join(tmpdir, 'revlog.d')
3663 shutil.copyfile(origindexpath, destindexpath)
3664 shutil.copyfile(origindexpath, destindexpath)
3664 shutil.copyfile(origdatapath, destdatapath)
3665 shutil.copyfile(origdatapath, destdatapath)
3665
3666
3666 # remove the data we want to add again
3667 # remove the data we want to add again
3667 ui.debug('truncating data to be rewritten\n')
3668 ui.debug('truncating data to be rewritten\n')
3668 with open(destindexpath, 'ab') as index:
3669 with open(destindexpath, 'ab') as index:
3669 index.seek(0)
3670 index.seek(0)
3670 index.truncate(truncaterev * orig._io.size)
3671 index.truncate(truncaterev * orig._io.size)
3671 with open(destdatapath, 'ab') as data:
3672 with open(destdatapath, 'ab') as data:
3672 data.seek(0)
3673 data.seek(0)
3673 data.truncate(orig.start(truncaterev))
3674 data.truncate(orig.start(truncaterev))
3674
3675
3675 # instantiate a new revlog from the temporary copy
3676 # instantiate a new revlog from the temporary copy
3676 ui.debug('truncating adding to be rewritten\n')
3677 ui.debug('truncating adding to be rewritten\n')
3677 vfs = vfsmod.vfs(tmpdir)
3678 vfs = vfsmod.vfs(tmpdir)
3678 vfs.options = getattr(orig.opener, 'options', None)
3679 vfs.options = getattr(orig.opener, 'options', None)
3679
3680
3680 try:
3681 try:
3681 dest = revlog(vfs, radix=radix, **revlogkwargs)
3682 dest = revlog(vfs, radix=radix, **revlogkwargs)
3682 except TypeError:
3683 except TypeError:
3683 dest = revlog(
3684 dest = revlog(
3684 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3685 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3685 )
3686 )
3686 if dest._inline:
3687 if dest._inline:
3687 raise error.Abort('not supporting inline revlog (yet)')
3688 raise error.Abort('not supporting inline revlog (yet)')
3688 # make sure internals are initialized
3689 # make sure internals are initialized
3689 dest.revision(len(dest) - 1)
3690 dest.revision(len(dest) - 1)
3690 yield dest
3691 yield dest
3691 del dest, vfs
3692 del dest, vfs
3692 finally:
3693 finally:
3693 shutil.rmtree(tmpdir, True)
3694 shutil.rmtree(tmpdir, True)
3694
3695
3695
3696
3696 @command(
3697 @command(
3697 b'perf::revlogchunks|perfrevlogchunks',
3698 b'perf::revlogchunks|perfrevlogchunks',
3698 revlogopts
3699 revlogopts
3699 + formatteropts
3700 + formatteropts
3700 + [
3701 + [
3701 (b'e', b'engines', b'', b'compression engines to use'),
3702 (b'e', b'engines', b'', b'compression engines to use'),
3702 (b's', b'startrev', 0, b'revision to start at'),
3703 (b's', b'startrev', 0, b'revision to start at'),
3703 ],
3704 ],
3704 b'-c|-m|FILE',
3705 b'-c|-m|FILE',
3705 )
3706 )
3706 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3707 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3707 """Benchmark operations on revlog chunks.
3708 """Benchmark operations on revlog chunks.
3708
3709
3709 Logically, each revlog is a collection of fulltext revisions. However,
3710 Logically, each revlog is a collection of fulltext revisions. However,
3710 stored within each revlog are "chunks" of possibly compressed data. This
3711 stored within each revlog are "chunks" of possibly compressed data. This
3711 data needs to be read and decompressed or compressed and written.
3712 data needs to be read and decompressed or compressed and written.
3712
3713
3713 This command measures the time it takes to read+decompress and recompress
3714 This command measures the time it takes to read+decompress and recompress
3714 chunks in a revlog. It effectively isolates I/O and compression performance.
3715 chunks in a revlog. It effectively isolates I/O and compression performance.
3715 For measurements of higher-level operations like resolving revisions,
3716 For measurements of higher-level operations like resolving revisions,
3716 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3717 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3717 """
3718 """
3718 opts = _byteskwargs(opts)
3719 opts = _byteskwargs(opts)
3719
3720
3720 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3721 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3721
3722
3722 # - _chunkraw was renamed to _getsegmentforrevs
3723 # - _chunkraw was renamed to _getsegmentforrevs
3723 # - _getsegmentforrevs was moved on the inner object
3724 # - _getsegmentforrevs was moved on the inner object
3724 try:
3725 try:
3725 segmentforrevs = rl._inner.get_segment_for_revs
3726 segmentforrevs = rl._inner.get_segment_for_revs
3726 except AttributeError:
3727 except AttributeError:
3727 try:
3728 try:
3728 segmentforrevs = rl._getsegmentforrevs
3729 segmentforrevs = rl._getsegmentforrevs
3729 except AttributeError:
3730 except AttributeError:
3730 segmentforrevs = rl._chunkraw
3731 segmentforrevs = rl._chunkraw
3731
3732
3732 # Verify engines argument.
3733 # Verify engines argument.
3733 if engines:
3734 if engines:
3734 engines = {e.strip() for e in engines.split(b',')}
3735 engines = {e.strip() for e in engines.split(b',')}
3735 for engine in engines:
3736 for engine in engines:
3736 try:
3737 try:
3737 util.compressionengines[engine]
3738 util.compressionengines[engine]
3738 except KeyError:
3739 except KeyError:
3739 raise error.Abort(b'unknown compression engine: %s' % engine)
3740 raise error.Abort(b'unknown compression engine: %s' % engine)
3740 else:
3741 else:
3741 engines = []
3742 engines = []
3742 for e in util.compengines:
3743 for e in util.compengines:
3743 engine = util.compengines[e]
3744 engine = util.compengines[e]
3744 try:
3745 try:
3745 if engine.available():
3746 if engine.available():
3746 engine.revlogcompressor().compress(b'dummy')
3747 engine.revlogcompressor().compress(b'dummy')
3747 engines.append(e)
3748 engines.append(e)
3748 except NotImplementedError:
3749 except NotImplementedError:
3749 pass
3750 pass
3750
3751
3751 revs = list(rl.revs(startrev, len(rl) - 1))
3752 revs = list(rl.revs(startrev, len(rl) - 1))
3752
3753
3753 @contextlib.contextmanager
3754 @contextlib.contextmanager
3754 def reading(rl):
3755 def reading(rl):
3755 if getattr(rl, 'reading', None) is not None:
3756 if getattr(rl, 'reading', None) is not None:
3756 with rl.reading():
3757 with rl.reading():
3757 yield None
3758 yield None
3758 elif rl._inline:
3759 elif rl._inline:
3759 indexfile = getattr(rl, '_indexfile', None)
3760 indexfile = getattr(rl, '_indexfile', None)
3760 if indexfile is None:
3761 if indexfile is None:
3761 # compatibility with <= hg-5.8
3762 # compatibility with <= hg-5.8
3762 indexfile = getattr(rl, 'indexfile')
3763 indexfile = getattr(rl, 'indexfile')
3763 yield getsvfs(repo)(indexfile)
3764 yield getsvfs(repo)(indexfile)
3764 else:
3765 else:
3765 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3766 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3766 yield getsvfs(repo)(datafile)
3767 yield getsvfs(repo)(datafile)
3767
3768
3768 if getattr(rl, 'reading', None) is not None:
3769 if getattr(rl, 'reading', None) is not None:
3769
3770
3770 @contextlib.contextmanager
3771 @contextlib.contextmanager
3771 def lazy_reading(rl):
3772 def lazy_reading(rl):
3772 with rl.reading():
3773 with rl.reading():
3773 yield
3774 yield
3774
3775
3775 else:
3776 else:
3776
3777
3777 @contextlib.contextmanager
3778 @contextlib.contextmanager
3778 def lazy_reading(rl):
3779 def lazy_reading(rl):
3779 yield
3780 yield
3780
3781
3781 def doread():
3782 def doread():
3782 rl.clearcaches()
3783 rl.clearcaches()
3783 for rev in revs:
3784 for rev in revs:
3784 with lazy_reading(rl):
3785 with lazy_reading(rl):
3785 segmentforrevs(rev, rev)
3786 segmentforrevs(rev, rev)
3786
3787
3787 def doreadcachedfh():
3788 def doreadcachedfh():
3788 rl.clearcaches()
3789 rl.clearcaches()
3789 with reading(rl) as fh:
3790 with reading(rl) as fh:
3790 if fh is not None:
3791 if fh is not None:
3791 for rev in revs:
3792 for rev in revs:
3792 segmentforrevs(rev, rev, df=fh)
3793 segmentforrevs(rev, rev, df=fh)
3793 else:
3794 else:
3794 for rev in revs:
3795 for rev in revs:
3795 segmentforrevs(rev, rev)
3796 segmentforrevs(rev, rev)
3796
3797
3797 def doreadbatch():
3798 def doreadbatch():
3798 rl.clearcaches()
3799 rl.clearcaches()
3799 with lazy_reading(rl):
3800 with lazy_reading(rl):
3800 segmentforrevs(revs[0], revs[-1])
3801 segmentforrevs(revs[0], revs[-1])
3801
3802
3802 def doreadbatchcachedfh():
3803 def doreadbatchcachedfh():
3803 rl.clearcaches()
3804 rl.clearcaches()
3804 with reading(rl) as fh:
3805 with reading(rl) as fh:
3805 if fh is not None:
3806 if fh is not None:
3806 segmentforrevs(revs[0], revs[-1], df=fh)
3807 segmentforrevs(revs[0], revs[-1], df=fh)
3807 else:
3808 else:
3808 segmentforrevs(revs[0], revs[-1])
3809 segmentforrevs(revs[0], revs[-1])
3809
3810
3810 def dochunk():
3811 def dochunk():
3811 rl.clearcaches()
3812 rl.clearcaches()
3812 # chunk used to be available directly on the revlog
3813 # chunk used to be available directly on the revlog
3813 _chunk = getattr(rl, '_inner', rl)._chunk
3814 _chunk = getattr(rl, '_inner', rl)._chunk
3814 with reading(rl) as fh:
3815 with reading(rl) as fh:
3815 if fh is not None:
3816 if fh is not None:
3816 for rev in revs:
3817 for rev in revs:
3817 _chunk(rev, df=fh)
3818 _chunk(rev, df=fh)
3818 else:
3819 else:
3819 for rev in revs:
3820 for rev in revs:
3820 _chunk(rev)
3821 _chunk(rev)
3821
3822
3822 chunks = [None]
3823 chunks = [None]
3823
3824
3824 def dochunkbatch():
3825 def dochunkbatch():
3825 rl.clearcaches()
3826 rl.clearcaches()
3826 _chunks = getattr(rl, '_inner', rl)._chunks
3827 _chunks = getattr(rl, '_inner', rl)._chunks
3827 with reading(rl) as fh:
3828 with reading(rl) as fh:
3828 if fh is not None:
3829 if fh is not None:
3829 # Save chunks as a side-effect.
3830 # Save chunks as a side-effect.
3830 chunks[0] = _chunks(revs, df=fh)
3831 chunks[0] = _chunks(revs, df=fh)
3831 else:
3832 else:
3832 # Save chunks as a side-effect.
3833 # Save chunks as a side-effect.
3833 chunks[0] = _chunks(revs)
3834 chunks[0] = _chunks(revs)
3834
3835
3835 def docompress(compressor):
3836 def docompress(compressor):
3836 rl.clearcaches()
3837 rl.clearcaches()
3837
3838
3838 compressor_holder = getattr(rl, '_inner', rl)
3839 compressor_holder = getattr(rl, '_inner', rl)
3839
3840
3840 try:
3841 try:
3841 # Swap in the requested compression engine.
3842 # Swap in the requested compression engine.
3842 oldcompressor = compressor_holder._compressor
3843 oldcompressor = compressor_holder._compressor
3843 compressor_holder._compressor = compressor
3844 compressor_holder._compressor = compressor
3844 for chunk in chunks[0]:
3845 for chunk in chunks[0]:
3845 rl.compress(chunk)
3846 rl.compress(chunk)
3846 finally:
3847 finally:
3847 compressor_holder._compressor = oldcompressor
3848 compressor_holder._compressor = oldcompressor
3848
3849
3849 benches = [
3850 benches = [
3850 (lambda: doread(), b'read'),
3851 (lambda: doread(), b'read'),
3851 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3852 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3852 (lambda: doreadbatch(), b'read batch'),
3853 (lambda: doreadbatch(), b'read batch'),
3853 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3854 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3854 (lambda: dochunk(), b'chunk'),
3855 (lambda: dochunk(), b'chunk'),
3855 (lambda: dochunkbatch(), b'chunk batch'),
3856 (lambda: dochunkbatch(), b'chunk batch'),
3856 ]
3857 ]
3857
3858
3858 for engine in sorted(engines):
3859 for engine in sorted(engines):
3859 compressor = util.compengines[engine].revlogcompressor()
3860 compressor = util.compengines[engine].revlogcompressor()
3860 benches.append(
3861 benches.append(
3861 (
3862 (
3862 functools.partial(docompress, compressor),
3863 functools.partial(docompress, compressor),
3863 b'compress w/ %s' % engine,
3864 b'compress w/ %s' % engine,
3864 )
3865 )
3865 )
3866 )
3866
3867
3867 for fn, title in benches:
3868 for fn, title in benches:
3868 timer, fm = gettimer(ui, opts)
3869 timer, fm = gettimer(ui, opts)
3869 timer(fn, title=title)
3870 timer(fn, title=title)
3870 fm.end()
3871 fm.end()
3871
3872
3872
3873
3873 @command(
3874 @command(
3874 b'perf::revlogrevision|perfrevlogrevision',
3875 b'perf::revlogrevision|perfrevlogrevision',
3875 revlogopts
3876 revlogopts
3876 + formatteropts
3877 + formatteropts
3877 + [(b'', b'cache', False, b'use caches instead of clearing')],
3878 + [(b'', b'cache', False, b'use caches instead of clearing')],
3878 b'-c|-m|FILE REV',
3879 b'-c|-m|FILE REV',
3879 )
3880 )
3880 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3881 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3881 """Benchmark obtaining a revlog revision.
3882 """Benchmark obtaining a revlog revision.
3882
3883
3883 Obtaining a revlog revision consists of roughly the following steps:
3884 Obtaining a revlog revision consists of roughly the following steps:
3884
3885
3885 1. Compute the delta chain
3886 1. Compute the delta chain
3886 2. Slice the delta chain if applicable
3887 2. Slice the delta chain if applicable
3887 3. Obtain the raw chunks for that delta chain
3888 3. Obtain the raw chunks for that delta chain
3888 4. Decompress each raw chunk
3889 4. Decompress each raw chunk
3889 5. Apply binary patches to obtain fulltext
3890 5. Apply binary patches to obtain fulltext
3890 6. Verify hash of fulltext
3891 6. Verify hash of fulltext
3891
3892
3892 This command measures the time spent in each of these phases.
3893 This command measures the time spent in each of these phases.
3893 """
3894 """
3894 opts = _byteskwargs(opts)
3895 opts = _byteskwargs(opts)
3895
3896
3896 if opts.get(b'changelog') or opts.get(b'manifest'):
3897 if opts.get(b'changelog') or opts.get(b'manifest'):
3897 file_, rev = None, file_
3898 file_, rev = None, file_
3898 elif rev is None:
3899 elif rev is None:
3899 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3900 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3900
3901
3901 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3902 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3902
3903
3903 # _chunkraw was renamed to _getsegmentforrevs.
3904 # _chunkraw was renamed to _getsegmentforrevs.
3904 try:
3905 try:
3905 segmentforrevs = r._inner.get_segment_for_revs
3906 segmentforrevs = r._inner.get_segment_for_revs
3906 except AttributeError:
3907 except AttributeError:
3907 try:
3908 try:
3908 segmentforrevs = r._getsegmentforrevs
3909 segmentforrevs = r._getsegmentforrevs
3909 except AttributeError:
3910 except AttributeError:
3910 segmentforrevs = r._chunkraw
3911 segmentforrevs = r._chunkraw
3911
3912
3912 node = r.lookup(rev)
3913 node = r.lookup(rev)
3913 rev = r.rev(node)
3914 rev = r.rev(node)
3914
3915
3915 if getattr(r, 'reading', None) is not None:
3916 if getattr(r, 'reading', None) is not None:
3916
3917
3917 @contextlib.contextmanager
3918 @contextlib.contextmanager
3918 def lazy_reading(r):
3919 def lazy_reading(r):
3919 with r.reading():
3920 with r.reading():
3920 yield
3921 yield
3921
3922
3922 else:
3923 else:
3923
3924
3924 @contextlib.contextmanager
3925 @contextlib.contextmanager
3925 def lazy_reading(r):
3926 def lazy_reading(r):
3926 yield
3927 yield
3927
3928
3928 def getrawchunks(data, chain):
3929 def getrawchunks(data, chain):
3929 start = r.start
3930 start = r.start
3930 length = r.length
3931 length = r.length
3931 inline = r._inline
3932 inline = r._inline
3932 try:
3933 try:
3933 iosize = r.index.entry_size
3934 iosize = r.index.entry_size
3934 except AttributeError:
3935 except AttributeError:
3935 iosize = r._io.size
3936 iosize = r._io.size
3936 buffer = util.buffer
3937 buffer = util.buffer
3937
3938
3938 chunks = []
3939 chunks = []
3939 ladd = chunks.append
3940 ladd = chunks.append
3940 for idx, item in enumerate(chain):
3941 for idx, item in enumerate(chain):
3941 offset = start(item[0])
3942 offset = start(item[0])
3942 bits = data[idx]
3943 bits = data[idx]
3943 for rev in item:
3944 for rev in item:
3944 chunkstart = start(rev)
3945 chunkstart = start(rev)
3945 if inline:
3946 if inline:
3946 chunkstart += (rev + 1) * iosize
3947 chunkstart += (rev + 1) * iosize
3947 chunklength = length(rev)
3948 chunklength = length(rev)
3948 ladd(buffer(bits, chunkstart - offset, chunklength))
3949 ladd(buffer(bits, chunkstart - offset, chunklength))
3949
3950
3950 return chunks
3951 return chunks
3951
3952
3952 def dodeltachain(rev):
3953 def dodeltachain(rev):
3953 if not cache:
3954 if not cache:
3954 r.clearcaches()
3955 r.clearcaches()
3955 r._deltachain(rev)
3956 r._deltachain(rev)
3956
3957
3957 def doread(chain):
3958 def doread(chain):
3958 if not cache:
3959 if not cache:
3959 r.clearcaches()
3960 r.clearcaches()
3960 for item in slicedchain:
3961 for item in slicedchain:
3961 with lazy_reading(r):
3962 with lazy_reading(r):
3962 segmentforrevs(item[0], item[-1])
3963 segmentforrevs(item[0], item[-1])
3963
3964
3964 def doslice(r, chain, size):
3965 def doslice(r, chain, size):
3965 for s in slicechunk(r, chain, targetsize=size):
3966 for s in slicechunk(r, chain, targetsize=size):
3966 pass
3967 pass
3967
3968
3968 def dorawchunks(data, chain):
3969 def dorawchunks(data, chain):
3969 if not cache:
3970 if not cache:
3970 r.clearcaches()
3971 r.clearcaches()
3971 getrawchunks(data, chain)
3972 getrawchunks(data, chain)
3972
3973
3973 def dodecompress(chunks):
3974 def dodecompress(chunks):
3974 decomp = r.decompress
3975 decomp = r.decompress
3975 for chunk in chunks:
3976 for chunk in chunks:
3976 decomp(chunk)
3977 decomp(chunk)
3977
3978
3978 def dopatch(text, bins):
3979 def dopatch(text, bins):
3979 if not cache:
3980 if not cache:
3980 r.clearcaches()
3981 r.clearcaches()
3981 mdiff.patches(text, bins)
3982 mdiff.patches(text, bins)
3982
3983
3983 def dohash(text):
3984 def dohash(text):
3984 if not cache:
3985 if not cache:
3985 r.clearcaches()
3986 r.clearcaches()
3986 r.checkhash(text, node, rev=rev)
3987 r.checkhash(text, node, rev=rev)
3987
3988
3988 def dorevision():
3989 def dorevision():
3989 if not cache:
3990 if not cache:
3990 r.clearcaches()
3991 r.clearcaches()
3991 r.revision(node)
3992 r.revision(node)
3992
3993
3993 try:
3994 try:
3994 from mercurial.revlogutils.deltas import slicechunk
3995 from mercurial.revlogutils.deltas import slicechunk
3995 except ImportError:
3996 except ImportError:
3996 slicechunk = getattr(revlog, '_slicechunk', None)
3997 slicechunk = getattr(revlog, '_slicechunk', None)
3997
3998
3998 size = r.length(rev)
3999 size = r.length(rev)
3999 chain = r._deltachain(rev)[0]
4000 chain = r._deltachain(rev)[0]
4000
4001
4001 with_sparse_read = False
4002 with_sparse_read = False
4002 if hasattr(r, 'data_config'):
4003 if hasattr(r, 'data_config'):
4003 with_sparse_read = r.data_config.with_sparse_read
4004 with_sparse_read = r.data_config.with_sparse_read
4004 elif hasattr(r, '_withsparseread'):
4005 elif hasattr(r, '_withsparseread'):
4005 with_sparse_read = r._withsparseread
4006 with_sparse_read = r._withsparseread
4006 if with_sparse_read:
4007 if with_sparse_read:
4007 slicedchain = (chain,)
4008 slicedchain = (chain,)
4008 else:
4009 else:
4009 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4010 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4010 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4011 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4011 rawchunks = getrawchunks(data, slicedchain)
4012 rawchunks = getrawchunks(data, slicedchain)
4012 bins = r._inner._chunks(chain)
4013 bins = r._inner._chunks(chain)
4013 text = bytes(bins[0])
4014 text = bytes(bins[0])
4014 bins = bins[1:]
4015 bins = bins[1:]
4015 text = mdiff.patches(text, bins)
4016 text = mdiff.patches(text, bins)
4016
4017
4017 benches = [
4018 benches = [
4018 (lambda: dorevision(), b'full'),
4019 (lambda: dorevision(), b'full'),
4019 (lambda: dodeltachain(rev), b'deltachain'),
4020 (lambda: dodeltachain(rev), b'deltachain'),
4020 (lambda: doread(chain), b'read'),
4021 (lambda: doread(chain), b'read'),
4021 ]
4022 ]
4022
4023
4023 if with_sparse_read:
4024 if with_sparse_read:
4024 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4025 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4025 benches.append(slicing)
4026 benches.append(slicing)
4026
4027
4027 benches.extend(
4028 benches.extend(
4028 [
4029 [
4029 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4030 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4030 (lambda: dodecompress(rawchunks), b'decompress'),
4031 (lambda: dodecompress(rawchunks), b'decompress'),
4031 (lambda: dopatch(text, bins), b'patch'),
4032 (lambda: dopatch(text, bins), b'patch'),
4032 (lambda: dohash(text), b'hash'),
4033 (lambda: dohash(text), b'hash'),
4033 ]
4034 ]
4034 )
4035 )
4035
4036
4036 timer, fm = gettimer(ui, opts)
4037 timer, fm = gettimer(ui, opts)
4037 for fn, title in benches:
4038 for fn, title in benches:
4038 timer(fn, title=title)
4039 timer(fn, title=title)
4039 fm.end()
4040 fm.end()
4040
4041
4041
4042
4042 @command(
4043 @command(
4043 b'perf::revset|perfrevset',
4044 b'perf::revset|perfrevset',
4044 [
4045 [
4045 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4046 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4046 (b'', b'contexts', False, b'obtain changectx for each revision'),
4047 (b'', b'contexts', False, b'obtain changectx for each revision'),
4047 ]
4048 ]
4048 + formatteropts,
4049 + formatteropts,
4049 b"REVSET",
4050 b"REVSET",
4050 )
4051 )
4051 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4052 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4052 """benchmark the execution time of a revset
4053 """benchmark the execution time of a revset
4053
4054
4054 Use the --clean option if need to evaluate the impact of build volatile
4055 Use the --clean option if need to evaluate the impact of build volatile
4055 revisions set cache on the revset execution. Volatile cache hold filtered
4056 revisions set cache on the revset execution. Volatile cache hold filtered
4056 and obsolete related cache."""
4057 and obsolete related cache."""
4057 opts = _byteskwargs(opts)
4058 opts = _byteskwargs(opts)
4058
4059
4059 timer, fm = gettimer(ui, opts)
4060 timer, fm = gettimer(ui, opts)
4060
4061
4061 def d():
4062 def d():
4062 if clear:
4063 if clear:
4063 repo.invalidatevolatilesets()
4064 repo.invalidatevolatilesets()
4064 if contexts:
4065 if contexts:
4065 for ctx in repo.set(expr):
4066 for ctx in repo.set(expr):
4066 pass
4067 pass
4067 else:
4068 else:
4068 for r in repo.revs(expr):
4069 for r in repo.revs(expr):
4069 pass
4070 pass
4070
4071
4071 timer(d)
4072 timer(d)
4072 fm.end()
4073 fm.end()
4073
4074
4074
4075
4075 @command(
4076 @command(
4076 b'perf::volatilesets|perfvolatilesets',
4077 b'perf::volatilesets|perfvolatilesets',
4077 [
4078 [
4078 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4079 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4079 ]
4080 ]
4080 + formatteropts,
4081 + formatteropts,
4081 )
4082 )
4082 def perfvolatilesets(ui, repo, *names, **opts):
4083 def perfvolatilesets(ui, repo, *names, **opts):
4083 """benchmark the computation of various volatile set
4084 """benchmark the computation of various volatile set
4084
4085
4085 Volatile set computes element related to filtering and obsolescence."""
4086 Volatile set computes element related to filtering and obsolescence."""
4086 opts = _byteskwargs(opts)
4087 opts = _byteskwargs(opts)
4087 timer, fm = gettimer(ui, opts)
4088 timer, fm = gettimer(ui, opts)
4088 repo = repo.unfiltered()
4089 repo = repo.unfiltered()
4089
4090
4090 def getobs(name):
4091 def getobs(name):
4091 def d():
4092 def d():
4092 repo.invalidatevolatilesets()
4093 repo.invalidatevolatilesets()
4093 if opts[b'clear_obsstore']:
4094 if opts[b'clear_obsstore']:
4094 clearfilecache(repo, b'obsstore')
4095 clearfilecache(repo, b'obsstore')
4095 obsolete.getrevs(repo, name)
4096 obsolete.getrevs(repo, name)
4096
4097
4097 return d
4098 return d
4098
4099
4099 allobs = sorted(obsolete.cachefuncs)
4100 allobs = sorted(obsolete.cachefuncs)
4100 if names:
4101 if names:
4101 allobs = [n for n in allobs if n in names]
4102 allobs = [n for n in allobs if n in names]
4102
4103
4103 for name in allobs:
4104 for name in allobs:
4104 timer(getobs(name), title=name)
4105 timer(getobs(name), title=name)
4105
4106
4106 def getfiltered(name):
4107 def getfiltered(name):
4107 def d():
4108 def d():
4108 repo.invalidatevolatilesets()
4109 repo.invalidatevolatilesets()
4109 if opts[b'clear_obsstore']:
4110 if opts[b'clear_obsstore']:
4110 clearfilecache(repo, b'obsstore')
4111 clearfilecache(repo, b'obsstore')
4111 repoview.filterrevs(repo, name)
4112 repoview.filterrevs(repo, name)
4112
4113
4113 return d
4114 return d
4114
4115
4115 allfilter = sorted(repoview.filtertable)
4116 allfilter = sorted(repoview.filtertable)
4116 if names:
4117 if names:
4117 allfilter = [n for n in allfilter if n in names]
4118 allfilter = [n for n in allfilter if n in names]
4118
4119
4119 for name in allfilter:
4120 for name in allfilter:
4120 timer(getfiltered(name), title=name)
4121 timer(getfiltered(name), title=name)
4121 fm.end()
4122 fm.end()
4122
4123
4123
4124
4124 @command(
4125 @command(
4125 b'perf::branchmap|perfbranchmap',
4126 b'perf::branchmap|perfbranchmap',
4126 [
4127 [
4127 (b'f', b'full', False, b'Includes build time of subset'),
4128 (b'f', b'full', False, b'Includes build time of subset'),
4128 (
4129 (
4129 b'',
4130 b'',
4130 b'clear-revbranch',
4131 b'clear-revbranch',
4131 False,
4132 False,
4132 b'purge the revbranch cache between computation',
4133 b'purge the revbranch cache between computation',
4133 ),
4134 ),
4134 ]
4135 ]
4135 + formatteropts,
4136 + formatteropts,
4136 )
4137 )
4137 def perfbranchmap(ui, repo, *filternames, **opts):
4138 def perfbranchmap(ui, repo, *filternames, **opts):
4138 """benchmark the update of a branchmap
4139 """benchmark the update of a branchmap
4139
4140
4140 This benchmarks the full repo.branchmap() call with read and write disabled
4141 This benchmarks the full repo.branchmap() call with read and write disabled
4141 """
4142 """
4142 opts = _byteskwargs(opts)
4143 opts = _byteskwargs(opts)
4143 full = opts.get(b"full", False)
4144 full = opts.get(b"full", False)
4144 clear_revbranch = opts.get(b"clear_revbranch", False)
4145 clear_revbranch = opts.get(b"clear_revbranch", False)
4145 timer, fm = gettimer(ui, opts)
4146 timer, fm = gettimer(ui, opts)
4146
4147
4147 def getbranchmap(filtername):
4148 def getbranchmap(filtername):
4148 """generate a benchmark function for the filtername"""
4149 """generate a benchmark function for the filtername"""
4149 if filtername is None:
4150 if filtername is None:
4150 view = repo
4151 view = repo
4151 else:
4152 else:
4152 view = repo.filtered(filtername)
4153 view = repo.filtered(filtername)
4153 if util.safehasattr(view._branchcaches, '_per_filter'):
4154 if util.safehasattr(view._branchcaches, '_per_filter'):
4154 filtered = view._branchcaches._per_filter
4155 filtered = view._branchcaches._per_filter
4155 else:
4156 else:
4156 # older versions
4157 # older versions
4157 filtered = view._branchcaches
4158 filtered = view._branchcaches
4158
4159
4159 def d():
4160 def d():
4160 if clear_revbranch:
4161 if clear_revbranch:
4161 repo.revbranchcache()._clear()
4162 repo.revbranchcache()._clear()
4162 if full:
4163 if full:
4163 view._branchcaches.clear()
4164 view._branchcaches.clear()
4164 else:
4165 else:
4165 filtered.pop(filtername, None)
4166 filtered.pop(filtername, None)
4166 view.branchmap()
4167 view.branchmap()
4167
4168
4168 return d
4169 return d
4169
4170
4170 # add filter in smaller subset to bigger subset
4171 # add filter in smaller subset to bigger subset
4171 possiblefilters = set(repoview.filtertable)
4172 possiblefilters = set(repoview.filtertable)
4172 if filternames:
4173 if filternames:
4173 possiblefilters &= set(filternames)
4174 possiblefilters &= set(filternames)
4174 subsettable = getbranchmapsubsettable()
4175 subsettable = getbranchmapsubsettable()
4175 allfilters = []
4176 allfilters = []
4176 while possiblefilters:
4177 while possiblefilters:
4177 for name in possiblefilters:
4178 for name in possiblefilters:
4178 subset = subsettable.get(name)
4179 subset = subsettable.get(name)
4179 if subset not in possiblefilters:
4180 if subset not in possiblefilters:
4180 break
4181 break
4181 else:
4182 else:
4182 assert False, b'subset cycle %s!' % possiblefilters
4183 assert False, b'subset cycle %s!' % possiblefilters
4183 allfilters.append(name)
4184 allfilters.append(name)
4184 possiblefilters.remove(name)
4185 possiblefilters.remove(name)
4185
4186
4186 # warm the cache
4187 # warm the cache
4187 if not full:
4188 if not full:
4188 for name in allfilters:
4189 for name in allfilters:
4189 repo.filtered(name).branchmap()
4190 repo.filtered(name).branchmap()
4190 if not filternames or b'unfiltered' in filternames:
4191 if not filternames or b'unfiltered' in filternames:
4191 # add unfiltered
4192 # add unfiltered
4192 allfilters.append(None)
4193 allfilters.append(None)
4193
4194
4194 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4195 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4195 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4196 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4196 branchcacheread.set(classmethod(lambda *args: None))
4197 branchcacheread.set(classmethod(lambda *args: None))
4197 else:
4198 else:
4198 # older versions
4199 # older versions
4199 branchcacheread = safeattrsetter(branchmap, b'read')
4200 branchcacheread = safeattrsetter(branchmap, b'read')
4200 branchcacheread.set(lambda *args: None)
4201 branchcacheread.set(lambda *args: None)
4201 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4202 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4202 branchcachewrite.set(lambda *args: None)
4203 branchcachewrite.set(lambda *args: None)
4203 try:
4204 try:
4204 for name in allfilters:
4205 for name in allfilters:
4205 printname = name
4206 printname = name
4206 if name is None:
4207 if name is None:
4207 printname = b'unfiltered'
4208 printname = b'unfiltered'
4208 timer(getbranchmap(name), title=printname)
4209 timer(getbranchmap(name), title=printname)
4209 finally:
4210 finally:
4210 branchcacheread.restore()
4211 branchcacheread.restore()
4211 branchcachewrite.restore()
4212 branchcachewrite.restore()
4212 fm.end()
4213 fm.end()
4213
4214
4214
4215
4215 @command(
4216 @command(
4216 b'perf::branchmapupdate|perfbranchmapupdate',
4217 b'perf::branchmapupdate|perfbranchmapupdate',
4217 [
4218 [
4218 (b'', b'base', [], b'subset of revision to start from'),
4219 (b'', b'base', [], b'subset of revision to start from'),
4219 (b'', b'target', [], b'subset of revision to end with'),
4220 (b'', b'target', [], b'subset of revision to end with'),
4220 (b'', b'clear-caches', False, b'clear cache between each runs'),
4221 (b'', b'clear-caches', False, b'clear cache between each runs'),
4221 ]
4222 ]
4222 + formatteropts,
4223 + formatteropts,
4223 )
4224 )
4224 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4225 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4225 """benchmark branchmap update from for <base> revs to <target> revs
4226 """benchmark branchmap update from for <base> revs to <target> revs
4226
4227
4227 If `--clear-caches` is passed, the following items will be reset before
4228 If `--clear-caches` is passed, the following items will be reset before
4228 each update:
4229 each update:
4229 * the changelog instance and associated indexes
4230 * the changelog instance and associated indexes
4230 * the rev-branch-cache instance
4231 * the rev-branch-cache instance
4231
4232
4232 Examples:
4233 Examples:
4233
4234
4234 # update for the one last revision
4235 # update for the one last revision
4235 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4236 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4236
4237
4237 $ update for change coming with a new branch
4238 $ update for change coming with a new branch
4238 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4239 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4239 """
4240 """
4240 from mercurial import branchmap
4241 from mercurial import branchmap
4241 from mercurial import repoview
4242 from mercurial import repoview
4242
4243
4243 opts = _byteskwargs(opts)
4244 opts = _byteskwargs(opts)
4244 timer, fm = gettimer(ui, opts)
4245 timer, fm = gettimer(ui, opts)
4245 clearcaches = opts[b'clear_caches']
4246 clearcaches = opts[b'clear_caches']
4246 unfi = repo.unfiltered()
4247 unfi = repo.unfiltered()
4247 x = [None] # used to pass data between closure
4248 x = [None] # used to pass data between closure
4248
4249
4249 # we use a `list` here to avoid possible side effect from smartset
4250 # we use a `list` here to avoid possible side effect from smartset
4250 baserevs = list(scmutil.revrange(repo, base))
4251 baserevs = list(scmutil.revrange(repo, base))
4251 targetrevs = list(scmutil.revrange(repo, target))
4252 targetrevs = list(scmutil.revrange(repo, target))
4252 if not baserevs:
4253 if not baserevs:
4253 raise error.Abort(b'no revisions selected for --base')
4254 raise error.Abort(b'no revisions selected for --base')
4254 if not targetrevs:
4255 if not targetrevs:
4255 raise error.Abort(b'no revisions selected for --target')
4256 raise error.Abort(b'no revisions selected for --target')
4256
4257
4257 # make sure the target branchmap also contains the one in the base
4258 # make sure the target branchmap also contains the one in the base
4258 targetrevs = list(set(baserevs) | set(targetrevs))
4259 targetrevs = list(set(baserevs) | set(targetrevs))
4259 targetrevs.sort()
4260 targetrevs.sort()
4260
4261
4261 cl = repo.changelog
4262 cl = repo.changelog
4262 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4263 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4263 allbaserevs.sort()
4264 allbaserevs.sort()
4264 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4265 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4265
4266
4266 newrevs = list(alltargetrevs.difference(allbaserevs))
4267 newrevs = list(alltargetrevs.difference(allbaserevs))
4267 newrevs.sort()
4268 newrevs.sort()
4268
4269
4269 allrevs = frozenset(unfi.changelog.revs())
4270 allrevs = frozenset(unfi.changelog.revs())
4270 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4271 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4271 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4272 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4272
4273
4273 def basefilter(repo, visibilityexceptions=None):
4274 def basefilter(repo, visibilityexceptions=None):
4274 return basefilterrevs
4275 return basefilterrevs
4275
4276
4276 def targetfilter(repo, visibilityexceptions=None):
4277 def targetfilter(repo, visibilityexceptions=None):
4277 return targetfilterrevs
4278 return targetfilterrevs
4278
4279
4279 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4280 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4280 ui.status(msg % (len(allbaserevs), len(newrevs)))
4281 ui.status(msg % (len(allbaserevs), len(newrevs)))
4281 if targetfilterrevs:
4282 if targetfilterrevs:
4282 msg = b'(%d revisions still filtered)\n'
4283 msg = b'(%d revisions still filtered)\n'
4283 ui.status(msg % len(targetfilterrevs))
4284 ui.status(msg % len(targetfilterrevs))
4284
4285
4285 try:
4286 try:
4286 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4287 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4287 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4288 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4288
4289
4289 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4290 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4290 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4291 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4291
4292
4292 # try to find an existing branchmap to reuse
4293 # try to find an existing branchmap to reuse
4293 subsettable = getbranchmapsubsettable()
4294 subsettable = getbranchmapsubsettable()
4294 candidatefilter = subsettable.get(None)
4295 candidatefilter = subsettable.get(None)
4295 while candidatefilter is not None:
4296 while candidatefilter is not None:
4296 candidatebm = repo.filtered(candidatefilter).branchmap()
4297 candidatebm = repo.filtered(candidatefilter).branchmap()
4297 if candidatebm.validfor(baserepo):
4298 if candidatebm.validfor(baserepo):
4298 filtered = repoview.filterrevs(repo, candidatefilter)
4299 filtered = repoview.filterrevs(repo, candidatefilter)
4299 missing = [r for r in allbaserevs if r in filtered]
4300 missing = [r for r in allbaserevs if r in filtered]
4300 base = candidatebm.copy()
4301 base = candidatebm.copy()
4301 base.update(baserepo, missing)
4302 base.update(baserepo, missing)
4302 break
4303 break
4303 candidatefilter = subsettable.get(candidatefilter)
4304 candidatefilter = subsettable.get(candidatefilter)
4304 else:
4305 else:
4305 # no suitable subset where found
4306 # no suitable subset where found
4306 base = branchmap.branchcache()
4307 base = branchmap.branchcache()
4307 base.update(baserepo, allbaserevs)
4308 base.update(baserepo, allbaserevs)
4308
4309
4309 def setup():
4310 def setup():
4310 x[0] = base.copy()
4311 x[0] = base.copy()
4311 if clearcaches:
4312 if clearcaches:
4312 unfi._revbranchcache = None
4313 unfi._revbranchcache = None
4313 clearchangelog(repo)
4314 clearchangelog(repo)
4314
4315
4315 def bench():
4316 def bench():
4316 x[0].update(targetrepo, newrevs)
4317 x[0].update(targetrepo, newrevs)
4317
4318
4318 timer(bench, setup=setup)
4319 timer(bench, setup=setup)
4319 fm.end()
4320 fm.end()
4320 finally:
4321 finally:
4321 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4322 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4322 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4323 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4323
4324
4324
4325
4325 @command(
4326 @command(
4326 b'perf::branchmapload|perfbranchmapload',
4327 b'perf::branchmapload|perfbranchmapload',
4327 [
4328 [
4328 (b'f', b'filter', b'', b'Specify repoview filter'),
4329 (b'f', b'filter', b'', b'Specify repoview filter'),
4329 (b'', b'list', False, b'List brachmap filter caches'),
4330 (b'', b'list', False, b'List brachmap filter caches'),
4330 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4331 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4331 ]
4332 ]
4332 + formatteropts,
4333 + formatteropts,
4333 )
4334 )
4334 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4335 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4335 """benchmark reading the branchmap"""
4336 """benchmark reading the branchmap"""
4336 opts = _byteskwargs(opts)
4337 opts = _byteskwargs(opts)
4337 clearrevlogs = opts[b'clear_revlogs']
4338 clearrevlogs = opts[b'clear_revlogs']
4338
4339
4339 if list:
4340 if list:
4340 for name, kind, st in repo.cachevfs.readdir(stat=True):
4341 for name, kind, st in repo.cachevfs.readdir(stat=True):
4341 if name.startswith(b'branch2'):
4342 if name.startswith(b'branch2'):
4342 filtername = name.partition(b'-')[2] or b'unfiltered'
4343 filtername = name.partition(b'-')[2] or b'unfiltered'
4343 ui.status(
4344 ui.status(
4344 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4345 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4345 )
4346 )
4346 return
4347 return
4347 if not filter:
4348 if not filter:
4348 filter = None
4349 filter = None
4349 subsettable = getbranchmapsubsettable()
4350 subsettable = getbranchmapsubsettable()
4350 if filter is None:
4351 if filter is None:
4351 repo = repo.unfiltered()
4352 repo = repo.unfiltered()
4352 else:
4353 else:
4353 repo = repoview.repoview(repo, filter)
4354 repo = repoview.repoview(repo, filter)
4354
4355
4355 repo.branchmap() # make sure we have a relevant, up to date branchmap
4356 repo.branchmap() # make sure we have a relevant, up to date branchmap
4356
4357
4357 try:
4358 try:
4358 fromfile = branchmap.branchcache.fromfile
4359 fromfile = branchmap.branchcache.fromfile
4359 except AttributeError:
4360 except AttributeError:
4360 # older versions
4361 # older versions
4361 fromfile = branchmap.read
4362 fromfile = branchmap.read
4362
4363
4363 currentfilter = filter
4364 currentfilter = filter
4364 # try once without timer, the filter may not be cached
4365 # try once without timer, the filter may not be cached
4365 while fromfile(repo) is None:
4366 while fromfile(repo) is None:
4366 currentfilter = subsettable.get(currentfilter)
4367 currentfilter = subsettable.get(currentfilter)
4367 if currentfilter is None:
4368 if currentfilter is None:
4368 raise error.Abort(
4369 raise error.Abort(
4369 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4370 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4370 )
4371 )
4371 repo = repo.filtered(currentfilter)
4372 repo = repo.filtered(currentfilter)
4372 timer, fm = gettimer(ui, opts)
4373 timer, fm = gettimer(ui, opts)
4373
4374
4374 def setup():
4375 def setup():
4375 if clearrevlogs:
4376 if clearrevlogs:
4376 clearchangelog(repo)
4377 clearchangelog(repo)
4377
4378
4378 def bench():
4379 def bench():
4379 fromfile(repo)
4380 fromfile(repo)
4380
4381
4381 timer(bench, setup=setup)
4382 timer(bench, setup=setup)
4382 fm.end()
4383 fm.end()
4383
4384
4384
4385
4385 @command(b'perf::loadmarkers|perfloadmarkers')
4386 @command(b'perf::loadmarkers|perfloadmarkers')
4386 def perfloadmarkers(ui, repo):
4387 def perfloadmarkers(ui, repo):
4387 """benchmark the time to parse the on-disk markers for a repo
4388 """benchmark the time to parse the on-disk markers for a repo
4388
4389
4389 Result is the number of markers in the repo."""
4390 Result is the number of markers in the repo."""
4390 timer, fm = gettimer(ui)
4391 timer, fm = gettimer(ui)
4391 svfs = getsvfs(repo)
4392 svfs = getsvfs(repo)
4392 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4393 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4393 fm.end()
4394 fm.end()
4394
4395
4395
4396
4396 @command(
4397 @command(
4397 b'perf::lrucachedict|perflrucachedict',
4398 b'perf::lrucachedict|perflrucachedict',
4398 formatteropts
4399 formatteropts
4399 + [
4400 + [
4400 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4401 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4401 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4402 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4402 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4403 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4403 (b'', b'size', 4, b'size of cache'),
4404 (b'', b'size', 4, b'size of cache'),
4404 (b'', b'gets', 10000, b'number of key lookups'),
4405 (b'', b'gets', 10000, b'number of key lookups'),
4405 (b'', b'sets', 10000, b'number of key sets'),
4406 (b'', b'sets', 10000, b'number of key sets'),
4406 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4407 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4407 (
4408 (
4408 b'',
4409 b'',
4409 b'mixedgetfreq',
4410 b'mixedgetfreq',
4410 50,
4411 50,
4411 b'frequency of get vs set ops in mixed mode',
4412 b'frequency of get vs set ops in mixed mode',
4412 ),
4413 ),
4413 ],
4414 ],
4414 norepo=True,
4415 norepo=True,
4415 )
4416 )
4416 def perflrucache(
4417 def perflrucache(
4417 ui,
4418 ui,
4418 mincost=0,
4419 mincost=0,
4419 maxcost=100,
4420 maxcost=100,
4420 costlimit=0,
4421 costlimit=0,
4421 size=4,
4422 size=4,
4422 gets=10000,
4423 gets=10000,
4423 sets=10000,
4424 sets=10000,
4424 mixed=10000,
4425 mixed=10000,
4425 mixedgetfreq=50,
4426 mixedgetfreq=50,
4426 **opts
4427 **opts
4427 ):
4428 ):
4428 opts = _byteskwargs(opts)
4429 opts = _byteskwargs(opts)
4429
4430
4430 def doinit():
4431 def doinit():
4431 for i in _xrange(10000):
4432 for i in _xrange(10000):
4432 util.lrucachedict(size)
4433 util.lrucachedict(size)
4433
4434
4434 costrange = list(range(mincost, maxcost + 1))
4435 costrange = list(range(mincost, maxcost + 1))
4435
4436
4436 values = []
4437 values = []
4437 for i in _xrange(size):
4438 for i in _xrange(size):
4438 values.append(random.randint(0, _maxint))
4439 values.append(random.randint(0, _maxint))
4439
4440
4440 # Get mode fills the cache and tests raw lookup performance with no
4441 # Get mode fills the cache and tests raw lookup performance with no
4441 # eviction.
4442 # eviction.
4442 getseq = []
4443 getseq = []
4443 for i in _xrange(gets):
4444 for i in _xrange(gets):
4444 getseq.append(random.choice(values))
4445 getseq.append(random.choice(values))
4445
4446
4446 def dogets():
4447 def dogets():
4447 d = util.lrucachedict(size)
4448 d = util.lrucachedict(size)
4448 for v in values:
4449 for v in values:
4449 d[v] = v
4450 d[v] = v
4450 for key in getseq:
4451 for key in getseq:
4451 value = d[key]
4452 value = d[key]
4452 value # silence pyflakes warning
4453 value # silence pyflakes warning
4453
4454
4454 def dogetscost():
4455 def dogetscost():
4455 d = util.lrucachedict(size, maxcost=costlimit)
4456 d = util.lrucachedict(size, maxcost=costlimit)
4456 for i, v in enumerate(values):
4457 for i, v in enumerate(values):
4457 d.insert(v, v, cost=costs[i])
4458 d.insert(v, v, cost=costs[i])
4458 for key in getseq:
4459 for key in getseq:
4459 try:
4460 try:
4460 value = d[key]
4461 value = d[key]
4461 value # silence pyflakes warning
4462 value # silence pyflakes warning
4462 except KeyError:
4463 except KeyError:
4463 pass
4464 pass
4464
4465
4465 # Set mode tests insertion speed with cache eviction.
4466 # Set mode tests insertion speed with cache eviction.
4466 setseq = []
4467 setseq = []
4467 costs = []
4468 costs = []
4468 for i in _xrange(sets):
4469 for i in _xrange(sets):
4469 setseq.append(random.randint(0, _maxint))
4470 setseq.append(random.randint(0, _maxint))
4470 costs.append(random.choice(costrange))
4471 costs.append(random.choice(costrange))
4471
4472
4472 def doinserts():
4473 def doinserts():
4473 d = util.lrucachedict(size)
4474 d = util.lrucachedict(size)
4474 for v in setseq:
4475 for v in setseq:
4475 d.insert(v, v)
4476 d.insert(v, v)
4476
4477
4477 def doinsertscost():
4478 def doinsertscost():
4478 d = util.lrucachedict(size, maxcost=costlimit)
4479 d = util.lrucachedict(size, maxcost=costlimit)
4479 for i, v in enumerate(setseq):
4480 for i, v in enumerate(setseq):
4480 d.insert(v, v, cost=costs[i])
4481 d.insert(v, v, cost=costs[i])
4481
4482
4482 def dosets():
4483 def dosets():
4483 d = util.lrucachedict(size)
4484 d = util.lrucachedict(size)
4484 for v in setseq:
4485 for v in setseq:
4485 d[v] = v
4486 d[v] = v
4486
4487
4487 # Mixed mode randomly performs gets and sets with eviction.
4488 # Mixed mode randomly performs gets and sets with eviction.
4488 mixedops = []
4489 mixedops = []
4489 for i in _xrange(mixed):
4490 for i in _xrange(mixed):
4490 r = random.randint(0, 100)
4491 r = random.randint(0, 100)
4491 if r < mixedgetfreq:
4492 if r < mixedgetfreq:
4492 op = 0
4493 op = 0
4493 else:
4494 else:
4494 op = 1
4495 op = 1
4495
4496
4496 mixedops.append(
4497 mixedops.append(
4497 (op, random.randint(0, size * 2), random.choice(costrange))
4498 (op, random.randint(0, size * 2), random.choice(costrange))
4498 )
4499 )
4499
4500
4500 def domixed():
4501 def domixed():
4501 d = util.lrucachedict(size)
4502 d = util.lrucachedict(size)
4502
4503
4503 for op, v, cost in mixedops:
4504 for op, v, cost in mixedops:
4504 if op == 0:
4505 if op == 0:
4505 try:
4506 try:
4506 d[v]
4507 d[v]
4507 except KeyError:
4508 except KeyError:
4508 pass
4509 pass
4509 else:
4510 else:
4510 d[v] = v
4511 d[v] = v
4511
4512
4512 def domixedcost():
4513 def domixedcost():
4513 d = util.lrucachedict(size, maxcost=costlimit)
4514 d = util.lrucachedict(size, maxcost=costlimit)
4514
4515
4515 for op, v, cost in mixedops:
4516 for op, v, cost in mixedops:
4516 if op == 0:
4517 if op == 0:
4517 try:
4518 try:
4518 d[v]
4519 d[v]
4519 except KeyError:
4520 except KeyError:
4520 pass
4521 pass
4521 else:
4522 else:
4522 d.insert(v, v, cost=cost)
4523 d.insert(v, v, cost=cost)
4523
4524
4524 benches = [
4525 benches = [
4525 (doinit, b'init'),
4526 (doinit, b'init'),
4526 ]
4527 ]
4527
4528
4528 if costlimit:
4529 if costlimit:
4529 benches.extend(
4530 benches.extend(
4530 [
4531 [
4531 (dogetscost, b'gets w/ cost limit'),
4532 (dogetscost, b'gets w/ cost limit'),
4532 (doinsertscost, b'inserts w/ cost limit'),
4533 (doinsertscost, b'inserts w/ cost limit'),
4533 (domixedcost, b'mixed w/ cost limit'),
4534 (domixedcost, b'mixed w/ cost limit'),
4534 ]
4535 ]
4535 )
4536 )
4536 else:
4537 else:
4537 benches.extend(
4538 benches.extend(
4538 [
4539 [
4539 (dogets, b'gets'),
4540 (dogets, b'gets'),
4540 (doinserts, b'inserts'),
4541 (doinserts, b'inserts'),
4541 (dosets, b'sets'),
4542 (dosets, b'sets'),
4542 (domixed, b'mixed'),
4543 (domixed, b'mixed'),
4543 ]
4544 ]
4544 )
4545 )
4545
4546
4546 for fn, title in benches:
4547 for fn, title in benches:
4547 timer, fm = gettimer(ui, opts)
4548 timer, fm = gettimer(ui, opts)
4548 timer(fn, title=title)
4549 timer(fn, title=title)
4549 fm.end()
4550 fm.end()
4550
4551
4551
4552
4552 @command(
4553 @command(
4553 b'perf::write|perfwrite',
4554 b'perf::write|perfwrite',
4554 formatteropts
4555 formatteropts
4555 + [
4556 + [
4556 (b'', b'write-method', b'write', b'ui write method'),
4557 (b'', b'write-method', b'write', b'ui write method'),
4557 (b'', b'nlines', 100, b'number of lines'),
4558 (b'', b'nlines', 100, b'number of lines'),
4558 (b'', b'nitems', 100, b'number of items (per line)'),
4559 (b'', b'nitems', 100, b'number of items (per line)'),
4559 (b'', b'item', b'x', b'item that is written'),
4560 (b'', b'item', b'x', b'item that is written'),
4560 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4561 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4561 (b'', b'flush-line', None, b'flush after each line'),
4562 (b'', b'flush-line', None, b'flush after each line'),
4562 ],
4563 ],
4563 )
4564 )
4564 def perfwrite(ui, repo, **opts):
4565 def perfwrite(ui, repo, **opts):
4565 """microbenchmark ui.write (and others)"""
4566 """microbenchmark ui.write (and others)"""
4566 opts = _byteskwargs(opts)
4567 opts = _byteskwargs(opts)
4567
4568
4568 write = getattr(ui, _sysstr(opts[b'write_method']))
4569 write = getattr(ui, _sysstr(opts[b'write_method']))
4569 nlines = int(opts[b'nlines'])
4570 nlines = int(opts[b'nlines'])
4570 nitems = int(opts[b'nitems'])
4571 nitems = int(opts[b'nitems'])
4571 item = opts[b'item']
4572 item = opts[b'item']
4572 batch_line = opts.get(b'batch_line')
4573 batch_line = opts.get(b'batch_line')
4573 flush_line = opts.get(b'flush_line')
4574 flush_line = opts.get(b'flush_line')
4574
4575
4575 if batch_line:
4576 if batch_line:
4576 line = item * nitems + b'\n'
4577 line = item * nitems + b'\n'
4577
4578
4578 def benchmark():
4579 def benchmark():
4579 for i in pycompat.xrange(nlines):
4580 for i in pycompat.xrange(nlines):
4580 if batch_line:
4581 if batch_line:
4581 write(line)
4582 write(line)
4582 else:
4583 else:
4583 for i in pycompat.xrange(nitems):
4584 for i in pycompat.xrange(nitems):
4584 write(item)
4585 write(item)
4585 write(b'\n')
4586 write(b'\n')
4586 if flush_line:
4587 if flush_line:
4587 ui.flush()
4588 ui.flush()
4588 ui.flush()
4589 ui.flush()
4589
4590
4590 timer, fm = gettimer(ui, opts)
4591 timer, fm = gettimer(ui, opts)
4591 timer(benchmark)
4592 timer(benchmark)
4592 fm.end()
4593 fm.end()
4593
4594
4594
4595
4595 def uisetup(ui):
4596 def uisetup(ui):
4596 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4597 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4597 commands, b'debugrevlogopts'
4598 commands, b'debugrevlogopts'
4598 ):
4599 ):
4599 # for "historical portability":
4600 # for "historical portability":
4600 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4601 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4601 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4602 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4602 # openrevlog() should cause failure, because it has been
4603 # openrevlog() should cause failure, because it has been
4603 # available since 3.5 (or 49c583ca48c4).
4604 # available since 3.5 (or 49c583ca48c4).
4604 def openrevlog(orig, repo, cmd, file_, opts):
4605 def openrevlog(orig, repo, cmd, file_, opts):
4605 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4606 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4606 raise error.Abort(
4607 raise error.Abort(
4607 b"This version doesn't support --dir option",
4608 b"This version doesn't support --dir option",
4608 hint=b"use 3.5 or later",
4609 hint=b"use 3.5 or later",
4609 )
4610 )
4610 return orig(repo, cmd, file_, opts)
4611 return orig(repo, cmd, file_, opts)
4611
4612
4612 name = _sysstr(b'openrevlog')
4613 name = _sysstr(b'openrevlog')
4613 extensions.wrapfunction(cmdutil, name, openrevlog)
4614 extensions.wrapfunction(cmdutil, name, openrevlog)
4614
4615
4615
4616
4616 @command(
4617 @command(
4617 b'perf::progress|perfprogress',
4618 b'perf::progress|perfprogress',
4618 formatteropts
4619 formatteropts
4619 + [
4620 + [
4620 (b'', b'topic', b'topic', b'topic for progress messages'),
4621 (b'', b'topic', b'topic', b'topic for progress messages'),
4621 (b'c', b'total', 1000000, b'total value we are progressing to'),
4622 (b'c', b'total', 1000000, b'total value we are progressing to'),
4622 ],
4623 ],
4623 norepo=True,
4624 norepo=True,
4624 )
4625 )
4625 def perfprogress(ui, topic=None, total=None, **opts):
4626 def perfprogress(ui, topic=None, total=None, **opts):
4626 """printing of progress bars"""
4627 """printing of progress bars"""
4627 opts = _byteskwargs(opts)
4628 opts = _byteskwargs(opts)
4628
4629
4629 timer, fm = gettimer(ui, opts)
4630 timer, fm = gettimer(ui, opts)
4630
4631
4631 def doprogress():
4632 def doprogress():
4632 with ui.makeprogress(topic, total=total) as progress:
4633 with ui.makeprogress(topic, total=total) as progress:
4633 for i in _xrange(total):
4634 for i in _xrange(total):
4634 progress.increment()
4635 progress.increment()
4635
4636
4636 timer(doprogress)
4637 timer(doprogress)
4637 fm.end()
4638 fm.end()
@@ -1,483 +1,484 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perf::addremove
81 perf::addremove
82 (no help text available)
82 (no help text available)
83 perf::ancestors
83 perf::ancestors
84 (no help text available)
84 (no help text available)
85 perf::ancestorset
85 perf::ancestorset
86 (no help text available)
86 (no help text available)
87 perf::annotate
87 perf::annotate
88 (no help text available)
88 (no help text available)
89 perf::bdiff benchmark a bdiff between revisions
89 perf::bdiff benchmark a bdiff between revisions
90 perf::bookmarks
90 perf::bookmarks
91 benchmark parsing bookmarks from disk to memory
91 benchmark parsing bookmarks from disk to memory
92 perf::branchmap
92 perf::branchmap
93 benchmark the update of a branchmap
93 benchmark the update of a branchmap
94 perf::branchmapload
94 perf::branchmapload
95 benchmark reading the branchmap
95 benchmark reading the branchmap
96 perf::branchmapupdate
96 perf::branchmapupdate
97 benchmark branchmap update from for <base> revs to <target>
97 benchmark branchmap update from for <base> revs to <target>
98 revs
98 revs
99 perf::bundle benchmark the creation of a bundle from a repository
99 perf::bundle benchmark the creation of a bundle from a repository
100 perf::bundleread
100 perf::bundleread
101 Benchmark reading of bundle files.
101 Benchmark reading of bundle files.
102 perf::cca (no help text available)
102 perf::cca (no help text available)
103 perf::changegroupchangelog
103 perf::changegroupchangelog
104 Benchmark producing a changelog group for a changegroup.
104 Benchmark producing a changelog group for a changegroup.
105 perf::changeset
105 perf::changeset
106 (no help text available)
106 (no help text available)
107 perf::ctxfiles
107 perf::ctxfiles
108 (no help text available)
108 (no help text available)
109 perf::delta-find
109 perf::delta-find
110 benchmark the process of finding a valid delta for a revlog
110 benchmark the process of finding a valid delta for a revlog
111 revision
111 revision
112 perf::diffwd Profile diff of working directory changes
112 perf::diffwd Profile diff of working directory changes
113 perf::dirfoldmap
113 perf::dirfoldmap
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 perf::dirs (no help text available)
115 perf::dirs (no help text available)
116 perf::dirstate
116 perf::dirstate
117 benchmap the time of various distate operations
117 benchmap the time of various distate operations
118 perf::dirstatedirs
118 perf::dirstatedirs
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 perf::dirstatefoldmap
120 perf::dirstatefoldmap
121 benchmap a 'dirstate._map.filefoldmap.get()' request
121 benchmap a 'dirstate._map.filefoldmap.get()' request
122 perf::dirstatewrite
122 perf::dirstatewrite
123 benchmap the time it take to write a dirstate on disk
123 benchmap the time it take to write a dirstate on disk
124 perf::discovery
124 perf::discovery
125 benchmark discovery between local repo and the peer at given
125 benchmark discovery between local repo and the peer at given
126 path
126 path
127 perf::fncacheencode
127 perf::fncacheencode
128 (no help text available)
128 (no help text available)
129 perf::fncacheload
129 perf::fncacheload
130 (no help text available)
130 (no help text available)
131 perf::fncachewrite
131 perf::fncachewrite
132 (no help text available)
132 (no help text available)
133 perf::heads benchmark the computation of a changelog heads
133 perf::heads benchmark the computation of a changelog heads
134 perf::helper-mergecopies
134 perf::helper-mergecopies
135 find statistics about potential parameters for
135 find statistics about potential parameters for
136 'perfmergecopies'
136 'perfmergecopies'
137 perf::helper-pathcopies
137 perf::helper-pathcopies
138 find statistic about potential parameters for the
138 find statistic about potential parameters for the
139 'perftracecopies'
139 'perftracecopies'
140 perf::ignore benchmark operation related to computing ignore
140 perf::ignore benchmark operation related to computing ignore
141 perf::index benchmark index creation time followed by a lookup
141 perf::index benchmark index creation time followed by a lookup
142 perf::linelogedits
142 perf::linelogedits
143 (no help text available)
143 (no help text available)
144 perf::loadmarkers
144 perf::loadmarkers
145 benchmark the time to parse the on-disk markers for a repo
145 benchmark the time to parse the on-disk markers for a repo
146 perf::log (no help text available)
146 perf::log (no help text available)
147 perf::lookup (no help text available)
147 perf::lookup (no help text available)
148 perf::lrucachedict
148 perf::lrucachedict
149 (no help text available)
149 (no help text available)
150 perf::manifest
150 perf::manifest
151 benchmark the time to read a manifest from disk and return a
151 benchmark the time to read a manifest from disk and return a
152 usable
152 usable
153 perf::mergecalculate
153 perf::mergecalculate
154 (no help text available)
154 (no help text available)
155 perf::mergecopies
155 perf::mergecopies
156 measure runtime of 'copies.mergecopies'
156 measure runtime of 'copies.mergecopies'
157 perf::moonwalk
157 perf::moonwalk
158 benchmark walking the changelog backwards
158 benchmark walking the changelog backwards
159 perf::nodelookup
159 perf::nodelookup
160 (no help text available)
160 (no help text available)
161 perf::nodemap
161 perf::nodemap
162 benchmark the time necessary to look up revision from a cold
162 benchmark the time necessary to look up revision from a cold
163 nodemap
163 nodemap
164 perf::parents
164 perf::parents
165 benchmark the time necessary to fetch one changeset's parents.
165 benchmark the time necessary to fetch one changeset's parents.
166 perf::pathcopies
166 perf::pathcopies
167 benchmark the copy tracing logic
167 benchmark the copy tracing logic
168 perf::phases benchmark phasesets computation
168 perf::phases benchmark phasesets computation
169 perf::phasesremote
169 perf::phasesremote
170 benchmark time needed to analyse phases of the remote server
170 benchmark time needed to analyse phases of the remote server
171 perf::progress
171 perf::progress
172 printing of progress bars
172 printing of progress bars
173 perf::rawfiles
173 perf::rawfiles
174 (no help text available)
174 (no help text available)
175 perf::revlogchunks
175 perf::revlogchunks
176 Benchmark operations on revlog chunks.
176 Benchmark operations on revlog chunks.
177 perf::revlogindex
177 perf::revlogindex
178 Benchmark operations against a revlog index.
178 Benchmark operations against a revlog index.
179 perf::revlogrevision
179 perf::revlogrevision
180 Benchmark obtaining a revlog revision.
180 Benchmark obtaining a revlog revision.
181 perf::revlogrevisions
181 perf::revlogrevisions
182 Benchmark reading a series of revisions from a revlog.
182 Benchmark reading a series of revisions from a revlog.
183 perf::revlogwrite
183 perf::revlogwrite
184 Benchmark writing a series of revisions to a revlog.
184 Benchmark writing a series of revisions to a revlog.
185 perf::revrange
185 perf::revrange
186 (no help text available)
186 (no help text available)
187 perf::revset benchmark the execution time of a revset
187 perf::revset benchmark the execution time of a revset
188 perf::startup
188 perf::startup
189 (no help text available)
189 (no help text available)
190 perf::status benchmark the performance of a single status call
190 perf::status benchmark the performance of a single status call
191 perf::stream-consume
191 perf::stream-consume
192 benchmark the full application of a stream clone
192 benchmark the full application of a stream clone
193 perf::stream-generate
193 perf::stream-generate
194 benchmark the full generation of a stream clone
194 benchmark the full generation of a stream clone
195 perf::stream-locked-section
195 perf::stream-locked-section
196 benchmark the initial, repo-locked, section of a stream-clone
196 benchmark the initial, repo-locked, section of a stream-clone
197 perf::tags Benchmark tags retrieval in various situation
197 perf::tags Benchmark tags retrieval in various situation
198 perf::templating
198 perf::templating
199 test the rendering time of a given template
199 test the rendering time of a given template
200 perf::unbundle
200 perf::unbundle
201 benchmark application of a bundle in a repository.
201 benchmark application of a bundle in a repository.
202 perf::unidiff
202 perf::unidiff
203 benchmark a unified diff between revisions
203 benchmark a unified diff between revisions
204 perf::volatilesets
204 perf::volatilesets
205 benchmark the computation of various volatile set
205 benchmark the computation of various volatile set
206 perf::walk (no help text available)
206 perf::walk (no help text available)
207 perf::write microbenchmark ui.write (and others)
207 perf::write microbenchmark ui.write (and others)
208
208
209 (use 'hg help -v perf' to show built-in aliases and global options)
209 (use 'hg help -v perf' to show built-in aliases and global options)
210
210
211 $ hg help perfaddremove
211 $ hg help perfaddremove
212 hg perf::addremove
212 hg perf::addremove
213
213
214 aliases: perfaddremove
214 aliases: perfaddremove
215
215
216 (no help text available)
216 (no help text available)
217
217
218 options:
218 options:
219
219
220 -T --template TEMPLATE display with template
220 -T --template TEMPLATE display with template
221
221
222 (some details hidden, use --verbose to show complete help)
222 (some details hidden, use --verbose to show complete help)
223
223
224 $ hg perfaddremove
224 $ hg perfaddremove
225 $ hg perfancestors
225 $ hg perfancestors
226 $ hg perfancestorset 2
226 $ hg perfancestorset 2
227 $ hg perfannotate a
227 $ hg perfannotate a
228 $ hg perfbdiff -c 1
228 $ hg perfbdiff -c 1
229 $ hg perfbdiff --alldata 1
229 $ hg perfbdiff --alldata 1
230 $ hg perfunidiff -c 1
230 $ hg perfunidiff -c 1
231 $ hg perfunidiff --alldata 1
231 $ hg perfunidiff --alldata 1
232 $ hg perfbookmarks
232 $ hg perfbookmarks
233 $ hg perfbranchmap
233 $ hg perfbranchmap
234 $ hg perfbranchmapload
234 $ hg perfbranchmapload
235 $ hg perfbranchmapupdate --base "not tip" --target "tip"
235 $ hg perfbranchmapupdate --base "not tip" --target "tip"
236 benchmark of branchmap with 3 revisions with 1 new ones
236 benchmark of branchmap with 3 revisions with 1 new ones
237 $ hg perfcca
237 $ hg perfcca
238 $ hg perfchangegroupchangelog
238 $ hg perfchangegroupchangelog
239 $ hg perfchangegroupchangelog --cgversion 01
239 $ hg perfchangegroupchangelog --cgversion 01
240 $ hg perfchangeset 2
240 $ hg perfchangeset 2
241 $ hg perfctxfiles 2
241 $ hg perfctxfiles 2
242 $ hg perfdiffwd
242 $ hg perfdiffwd
243 $ hg perfdirfoldmap
243 $ hg perfdirfoldmap
244 $ hg perfdirs
244 $ hg perfdirs
245 $ hg perfdirstate
245 $ hg perfdirstate
246 $ hg perfdirstate --contains
246 $ hg perfdirstate --contains
247 $ hg perfdirstate --iteration
247 $ hg perfdirstate --iteration
248 $ hg perfdirstatedirs
248 $ hg perfdirstatedirs
249 $ hg perfdirstatefoldmap
249 $ hg perfdirstatefoldmap
250 $ hg perfdirstatewrite
250 $ hg perfdirstatewrite
251 #if repofncache
251 #if repofncache
252 $ hg perffncacheencode
252 $ hg perffncacheencode
253 $ hg perffncacheload
253 $ hg perffncacheload
254 $ hg debugrebuildfncache
254 $ hg debugrebuildfncache
255 fncache already up to date
255 fncache already up to date
256 $ hg perffncachewrite
256 $ hg perffncachewrite
257 $ hg debugrebuildfncache
257 $ hg debugrebuildfncache
258 fncache already up to date
258 fncache already up to date
259 #endif
259 #endif
260 $ hg perfheads
260 $ hg perfheads
261 $ hg perfignore
261 $ hg perfignore
262 $ hg perfindex
262 $ hg perfindex
263 $ hg perflinelogedits -n 1
263 $ hg perflinelogedits -n 1
264 $ hg perfloadmarkers
264 $ hg perfloadmarkers
265 $ hg perflog
265 $ hg perflog
266 $ hg perflookup 2
266 $ hg perflookup 2
267 $ hg perflrucache
267 $ hg perflrucache
268 $ hg perfmanifest 2
268 $ hg perfmanifest 2
269 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
269 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
270 $ hg perfmanifest -m 44fe2c8352bb
270 $ hg perfmanifest -m 44fe2c8352bb
271 abort: manifest revision must be integer or full node
271 abort: manifest revision must be integer or full node
272 [255]
272 [255]
273 $ hg perfmergecalculate -r 3
273 $ hg perfmergecalculate -r 3
274 $ hg perfmoonwalk
274 $ hg perfmoonwalk
275 $ hg perfnodelookup 2
275 $ hg perfnodelookup 2
276 $ hg perfpathcopies 1 2
276 $ hg perfpathcopies 1 2
277 $ hg perfprogress --total 1000
277 $ hg perfprogress --total 1000
278 $ hg perfrawfiles 2
278 $ hg perfrawfiles 2
279 $ hg perfrevlogindex -c
279 $ hg perfrevlogindex -c
280 #if reporevlogstore
280 #if reporevlogstore
281 $ hg perfrevlogrevisions .hg/store/data/a.i
281 $ hg perfrevlogrevisions .hg/store/data/a.i
282 #endif
282 #endif
283 $ hg perfrevlogrevision -m 0
283 $ hg perfrevlogrevision -m 0
284 $ hg perfrevlogchunks -c
284 $ hg perfrevlogchunks -c
285 $ hg perfrevrange
285 $ hg perfrevrange
286 $ hg perfrevset 'all()'
286 $ hg perfrevset 'all()'
287 $ hg perfstartup
287 $ hg perfstartup
288 $ hg perfstatus
288 $ hg perfstatus
289 $ hg perfstatus --dirstate
289 $ hg perfstatus --dirstate
290 $ hg perftags
290 $ hg perftags
291 $ hg perftemplating
291 $ hg perftemplating
292 $ hg perfvolatilesets
292 $ hg perfvolatilesets
293 $ hg perfwalk
293 $ hg perfwalk
294 $ hg perfparents
294 $ hg perfparents
295 $ hg perfdiscovery -q .
295 $ hg perfdiscovery -q .
296 $ hg perf::phases
296
297
297 Test run control
298 Test run control
298 ----------------
299 ----------------
299
300
300 Simple single entry
301 Simple single entry
301
302
302 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
303 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
303 ! wall * comb * user * sys * (best of 15) (glob)
304 ! wall * comb * user * sys * (best of 15) (glob)
304 ! wall * comb * user * sys * (max of 15) (glob)
305 ! wall * comb * user * sys * (max of 15) (glob)
305 ! wall * comb * user * sys * (avg of 15) (glob)
306 ! wall * comb * user * sys * (avg of 15) (glob)
306 ! wall * comb * user * sys * (median of 15) (glob)
307 ! wall * comb * user * sys * (median of 15) (glob)
307
308
308 Multiple entries
309 Multiple entries
309
310
310 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
311 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
311 ! wall * comb * user * sys * (best of 50) (glob)
312 ! wall * comb * user * sys * (best of 50) (glob)
312 ! wall * comb * user * sys * (max of 50) (glob)
313 ! wall * comb * user * sys * (max of 50) (glob)
313 ! wall * comb * user * sys * (avg of 50) (glob)
314 ! wall * comb * user * sys * (avg of 50) (glob)
314 ! wall * comb * user * sys * (median of 50) (glob)
315 ! wall * comb * user * sys * (median of 50) (glob)
315
316
316 error case are ignored
317 error case are ignored
317
318
318 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
319 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
319 malformatted run limit entry, missing "-": 500
320 malformatted run limit entry, missing "-": 500
320 ! wall * comb * user * sys * (best of 50) (glob)
321 ! wall * comb * user * sys * (best of 50) (glob)
321 ! wall * comb * user * sys * (max of 50) (glob)
322 ! wall * comb * user * sys * (max of 50) (glob)
322 ! wall * comb * user * sys * (avg of 50) (glob)
323 ! wall * comb * user * sys * (avg of 50) (glob)
323 ! wall * comb * user * sys * (median of 50) (glob)
324 ! wall * comb * user * sys * (median of 50) (glob)
324 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
325 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
325 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
326 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
326 ! wall * comb * user * sys * (best of 50) (glob)
327 ! wall * comb * user * sys * (best of 50) (glob)
327 ! wall * comb * user * sys * (max of 50) (glob)
328 ! wall * comb * user * sys * (max of 50) (glob)
328 ! wall * comb * user * sys * (avg of 50) (glob)
329 ! wall * comb * user * sys * (avg of 50) (glob)
329 ! wall * comb * user * sys * (median of 50) (glob)
330 ! wall * comb * user * sys * (median of 50) (glob)
330 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
331 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
331 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
332 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
332 ! wall * comb * user * sys * (best of 50) (glob)
333 ! wall * comb * user * sys * (best of 50) (glob)
333 ! wall * comb * user * sys * (max of 50) (glob)
334 ! wall * comb * user * sys * (max of 50) (glob)
334 ! wall * comb * user * sys * (avg of 50) (glob)
335 ! wall * comb * user * sys * (avg of 50) (glob)
335 ! wall * comb * user * sys * (median of 50) (glob)
336 ! wall * comb * user * sys * (median of 50) (glob)
336
337
337 test actual output
338 test actual output
338 ------------------
339 ------------------
339
340
340 normal output:
341 normal output:
341
342
342 $ hg perfheads --config perf.stub=no
343 $ hg perfheads --config perf.stub=no
343 ! wall * comb * user * sys * (best of *) (glob)
344 ! wall * comb * user * sys * (best of *) (glob)
344 ! wall * comb * user * sys * (max of *) (glob)
345 ! wall * comb * user * sys * (max of *) (glob)
345 ! wall * comb * user * sys * (avg of *) (glob)
346 ! wall * comb * user * sys * (avg of *) (glob)
346 ! wall * comb * user * sys * (median of *) (glob)
347 ! wall * comb * user * sys * (median of *) (glob)
347
348
348 detailed output:
349 detailed output:
349
350
350 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
351 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
351 ! wall * comb * user * sys * (best of *) (glob)
352 ! wall * comb * user * sys * (best of *) (glob)
352 ! wall * comb * user * sys * (max of *) (glob)
353 ! wall * comb * user * sys * (max of *) (glob)
353 ! wall * comb * user * sys * (avg of *) (glob)
354 ! wall * comb * user * sys * (avg of *) (glob)
354 ! wall * comb * user * sys * (median of *) (glob)
355 ! wall * comb * user * sys * (median of *) (glob)
355
356
356 test json output
357 test json output
357 ----------------
358 ----------------
358
359
359 normal output:
360 normal output:
360
361
361 $ hg perfheads --template json --config perf.stub=no
362 $ hg perfheads --template json --config perf.stub=no
362 [
363 [
363 {
364 {
364 "avg.comb": *, (glob)
365 "avg.comb": *, (glob)
365 "avg.count": *, (glob)
366 "avg.count": *, (glob)
366 "avg.sys": *, (glob)
367 "avg.sys": *, (glob)
367 "avg.user": *, (glob)
368 "avg.user": *, (glob)
368 "avg.wall": *, (glob)
369 "avg.wall": *, (glob)
369 "comb": *, (glob)
370 "comb": *, (glob)
370 "count": *, (glob)
371 "count": *, (glob)
371 "max.comb": *, (glob)
372 "max.comb": *, (glob)
372 "max.count": *, (glob)
373 "max.count": *, (glob)
373 "max.sys": *, (glob)
374 "max.sys": *, (glob)
374 "max.user": *, (glob)
375 "max.user": *, (glob)
375 "max.wall": *, (glob)
376 "max.wall": *, (glob)
376 "median.comb": *, (glob)
377 "median.comb": *, (glob)
377 "median.count": *, (glob)
378 "median.count": *, (glob)
378 "median.sys": *, (glob)
379 "median.sys": *, (glob)
379 "median.user": *, (glob)
380 "median.user": *, (glob)
380 "median.wall": *, (glob)
381 "median.wall": *, (glob)
381 "sys": *, (glob)
382 "sys": *, (glob)
382 "user": *, (glob)
383 "user": *, (glob)
383 "wall": * (glob)
384 "wall": * (glob)
384 }
385 }
385 ]
386 ]
386
387
387 detailed output:
388 detailed output:
388
389
389 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
390 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
390 [
391 [
391 {
392 {
392 "avg.comb": *, (glob)
393 "avg.comb": *, (glob)
393 "avg.count": *, (glob)
394 "avg.count": *, (glob)
394 "avg.sys": *, (glob)
395 "avg.sys": *, (glob)
395 "avg.user": *, (glob)
396 "avg.user": *, (glob)
396 "avg.wall": *, (glob)
397 "avg.wall": *, (glob)
397 "comb": *, (glob)
398 "comb": *, (glob)
398 "count": *, (glob)
399 "count": *, (glob)
399 "max.comb": *, (glob)
400 "max.comb": *, (glob)
400 "max.count": *, (glob)
401 "max.count": *, (glob)
401 "max.sys": *, (glob)
402 "max.sys": *, (glob)
402 "max.user": *, (glob)
403 "max.user": *, (glob)
403 "max.wall": *, (glob)
404 "max.wall": *, (glob)
404 "median.comb": *, (glob)
405 "median.comb": *, (glob)
405 "median.count": *, (glob)
406 "median.count": *, (glob)
406 "median.sys": *, (glob)
407 "median.sys": *, (glob)
407 "median.user": *, (glob)
408 "median.user": *, (glob)
408 "median.wall": *, (glob)
409 "median.wall": *, (glob)
409 "sys": *, (glob)
410 "sys": *, (glob)
410 "user": *, (glob)
411 "user": *, (glob)
411 "wall": * (glob)
412 "wall": * (glob)
412 }
413 }
413 ]
414 ]
414
415
415 Test pre-run feature
416 Test pre-run feature
416 --------------------
417 --------------------
417
418
418 (perf discovery has some spurious output)
419 (perf discovery has some spurious output)
419
420
420 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
421 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
421 ! wall * comb * user * sys * (best of 1) (glob)
422 ! wall * comb * user * sys * (best of 1) (glob)
422 ! wall * comb * user * sys * (max of 1) (glob)
423 ! wall * comb * user * sys * (max of 1) (glob)
423 ! wall * comb * user * sys * (avg of 1) (glob)
424 ! wall * comb * user * sys * (avg of 1) (glob)
424 ! wall * comb * user * sys * (median of 1) (glob)
425 ! wall * comb * user * sys * (median of 1) (glob)
425 searching for changes
426 searching for changes
426 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
427 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
427 ! wall * comb * user * sys * (best of 1) (glob)
428 ! wall * comb * user * sys * (best of 1) (glob)
428 ! wall * comb * user * sys * (max of 1) (glob)
429 ! wall * comb * user * sys * (max of 1) (glob)
429 ! wall * comb * user * sys * (avg of 1) (glob)
430 ! wall * comb * user * sys * (avg of 1) (glob)
430 ! wall * comb * user * sys * (median of 1) (glob)
431 ! wall * comb * user * sys * (median of 1) (glob)
431 searching for changes
432 searching for changes
432 searching for changes
433 searching for changes
433 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
434 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
434 ! wall * comb * user * sys * (best of 1) (glob)
435 ! wall * comb * user * sys * (best of 1) (glob)
435 ! wall * comb * user * sys * (max of 1) (glob)
436 ! wall * comb * user * sys * (max of 1) (glob)
436 ! wall * comb * user * sys * (avg of 1) (glob)
437 ! wall * comb * user * sys * (avg of 1) (glob)
437 ! wall * comb * user * sys * (median of 1) (glob)
438 ! wall * comb * user * sys * (median of 1) (glob)
438 searching for changes
439 searching for changes
439 searching for changes
440 searching for changes
440 searching for changes
441 searching for changes
441 searching for changes
442 searching for changes
442 $ hg perf::bundle 'last(all(), 5)'
443 $ hg perf::bundle 'last(all(), 5)'
443 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
444 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
444 4 changesets found
445 4 changesets found
445 $ hg perf::unbundle last-5.hg
446 $ hg perf::unbundle last-5.hg
446
447
447
448
448 test profile-benchmark option
449 test profile-benchmark option
449 ------------------------------
450 ------------------------------
450
451
451 Function to check that statprof ran
452 Function to check that statprof ran
452 $ statprofran () {
453 $ statprofran () {
453 > grep -E 'Sample count:|No samples recorded' > /dev/null
454 > grep -E 'Sample count:|No samples recorded' > /dev/null
454 > }
455 > }
455 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
456 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
456
457
457 Check perf.py for historical portability
458 Check perf.py for historical portability
458 ----------------------------------------
459 ----------------------------------------
459
460
460 $ cd "$TESTDIR/.."
461 $ cd "$TESTDIR/.."
461
462
462 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
463 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
463 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
464 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
464 > "$TESTDIR"/check-perf-code.py contrib/perf.py
465 > "$TESTDIR"/check-perf-code.py contrib/perf.py
465 contrib/perf.py:\d+: (re)
466 contrib/perf.py:\d+: (re)
466 > from mercurial import (
467 > from mercurial import (
467 import newer module separately in try clause for early Mercurial
468 import newer module separately in try clause for early Mercurial
468 contrib/perf.py:\d+: (re)
469 contrib/perf.py:\d+: (re)
469 > from mercurial import (
470 > from mercurial import (
470 import newer module separately in try clause for early Mercurial
471 import newer module separately in try clause for early Mercurial
471 contrib/perf.py:\d+: (re)
472 contrib/perf.py:\d+: (re)
472 > origindexpath = orig.opener.join(indexfile)
473 > origindexpath = orig.opener.join(indexfile)
473 use getvfs()/getsvfs() for early Mercurial
474 use getvfs()/getsvfs() for early Mercurial
474 contrib/perf.py:\d+: (re)
475 contrib/perf.py:\d+: (re)
475 > origdatapath = orig.opener.join(datafile)
476 > origdatapath = orig.opener.join(datafile)
476 use getvfs()/getsvfs() for early Mercurial
477 use getvfs()/getsvfs() for early Mercurial
477 contrib/perf.py:\d+: (re)
478 contrib/perf.py:\d+: (re)
478 > vfs = vfsmod.vfs(tmpdir)
479 > vfs = vfsmod.vfs(tmpdir)
479 use getvfs()/getsvfs() for early Mercurial
480 use getvfs()/getsvfs() for early Mercurial
480 contrib/perf.py:\d+: (re)
481 contrib/perf.py:\d+: (re)
481 > vfs.options = getattr(orig.opener, 'options', None)
482 > vfs.options = getattr(orig.opener, 'options', None)
482 use getvfs()/getsvfs() for early Mercurial
483 use getvfs()/getsvfs() for early Mercurial
483 [1]
484 [1]
General Comments 0
You need to be logged in to leave comments. Login now