##// END OF EJS Templates
perf: create the temporary target next to the source in stream-consume...
marmoute -
r52453:8e8776a2 default
parent child Browse files
Show More
@@ -1,4681 +1,4689 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", True)
459 displayall = ui.configbool(b"perf", b"all-timing", True)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
886 from mercurial import tags
887
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
889
890
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
892 from mercurial import tags
893
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
895
896
896
897 def _default_forget_fnodes(repo, revs):
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
899 fnodes cache"""
900 from mercurial import tags
900 from mercurial import tags
901
901
902 missing_1 = b'\xff' * 4
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
907 cache.write()
908
908
909
909
910 @command(
910 @command(
911 b'perf::tags|perftags',
911 b'perf::tags|perftags',
912 formatteropts
912 formatteropts
913 + [
913 + [
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
915 (
916 b'',
916 b'',
917 b'clear-on-disk-cache',
917 b'clear-on-disk-cache',
918 False,
918 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
919 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
920 ),
921 (
921 (
922 b'',
922 b'',
923 b'clear-fnode-cache-all',
923 b'clear-fnode-cache-all',
924 False,
924 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
925 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
926 ),
927 (
927 (
928 b'',
928 b'',
929 b'clear-fnode-cache-rev',
929 b'clear-fnode-cache-rev',
930 [],
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
932 b'REVS',
933 ),
933 ),
934 (
934 (
935 b'',
935 b'',
936 b'update-last',
936 b'update-last',
937 b'',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
939 b'N',
940 ),
940 ),
941 ],
941 ],
942 )
942 )
943 def perftags(ui, repo, **opts):
943 def perftags(ui, repo, **opts):
944 """Benchmark tags retrieval in various situation
944 """Benchmark tags retrieval in various situation
945
945
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 altering performance after the command was run. However, it does not
947 altering performance after the command was run. However, it does not
948 destroy any stored data.
948 destroy any stored data.
949 """
949 """
950 from mercurial import tags
950 from mercurial import tags
951
951
952 opts = _byteskwargs(opts)
952 opts = _byteskwargs(opts)
953 timer, fm = gettimer(ui, opts)
953 timer, fm = gettimer(ui, opts)
954 repocleartagscache = repocleartagscachefunc(repo)
954 repocleartagscache = repocleartagscachefunc(repo)
955 clearrevlogs = opts[b'clear_revlogs']
955 clearrevlogs = opts[b'clear_revlogs']
956 clear_disk = opts[b'clear_on_disk_cache']
956 clear_disk = opts[b'clear_on_disk_cache']
957 clear_fnode = opts[b'clear_fnode_cache_all']
957 clear_fnode = opts[b'clear_fnode_cache_all']
958
958
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
960 update_last_str = opts[b'update_last']
961 update_last = None
961 update_last = None
962 if update_last_str:
962 if update_last_str:
963 try:
963 try:
964 update_last = int(update_last_str)
964 update_last = int(update_last_str)
965 except ValueError:
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
967 msg %= update_last_str
968 hint = b'value should be an integer'
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
969 raise error.Abort(msg, hint=hint)
970
970
971 clear_disk_fn = getattr(
971 clear_disk_fn = getattr(
972 tags,
972 tags,
973 "clear_cache_on_disk",
973 "clear_cache_on_disk",
974 _default_clear_on_disk_tags_cache,
974 _default_clear_on_disk_tags_cache,
975 )
975 )
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
977 clear_fnodes_fn = tags.clear_cache_fnodes
977 clear_fnodes_fn = tags.clear_cache_fnodes
978 else:
978 else:
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
980 clear_fnodes_rev_fn = getattr(
980 clear_fnodes_rev_fn = getattr(
981 tags,
981 tags,
982 "forget_fnodes",
982 "forget_fnodes",
983 _default_forget_fnodes,
983 _default_forget_fnodes,
984 )
984 )
985
985
986 clear_revs = []
986 clear_revs = []
987 if clear_fnode_revs:
987 if clear_fnode_revs:
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
989
989
990 if update_last:
990 if update_last:
991 revset = b'last(all(), %d)' % update_last
991 revset = b'last(all(), %d)' % update_last
992 last_revs = repo.unfiltered().revs(revset)
992 last_revs = repo.unfiltered().revs(revset)
993 clear_revs.extend(last_revs)
993 clear_revs.extend(last_revs)
994
994
995 from mercurial import repoview
995 from mercurial import repoview
996
996
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 filter_id = repoview.extrafilter(repo.ui)
999 filter_id = repoview.extrafilter(repo.ui)
1000
1000
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 pre_repo = repo.filtered(filter_name)
1002 pre_repo = repo.filtered(filter_name)
1003 pre_repo.tags() # warm the cache
1003 pre_repo.tags() # warm the cache
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006
1006
1007 clear_revs = sorted(set(clear_revs))
1007 clear_revs = sorted(set(clear_revs))
1008
1008
1009 def s():
1009 def s():
1010 if update_last:
1010 if update_last:
1011 util.copyfile(old_tags_path, new_tags_path)
1011 util.copyfile(old_tags_path, new_tags_path)
1012 if clearrevlogs:
1012 if clearrevlogs:
1013 clearchangelog(repo)
1013 clearchangelog(repo)
1014 clearfilecache(repo.unfiltered(), 'manifest')
1014 clearfilecache(repo.unfiltered(), 'manifest')
1015 if clear_disk:
1015 if clear_disk:
1016 clear_disk_fn(repo)
1016 clear_disk_fn(repo)
1017 if clear_fnode:
1017 if clear_fnode:
1018 clear_fnodes_fn(repo)
1018 clear_fnodes_fn(repo)
1019 elif clear_revs:
1019 elif clear_revs:
1020 clear_fnodes_rev_fn(repo, clear_revs)
1020 clear_fnodes_rev_fn(repo, clear_revs)
1021 repocleartagscache()
1021 repocleartagscache()
1022
1022
1023 def t():
1023 def t():
1024 len(repo.tags())
1024 len(repo.tags())
1025
1025
1026 timer(t, setup=s)
1026 timer(t, setup=s)
1027 fm.end()
1027 fm.end()
1028
1028
1029
1029
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 def perfancestors(ui, repo, **opts):
1031 def perfancestors(ui, repo, **opts):
1032 opts = _byteskwargs(opts)
1032 opts = _byteskwargs(opts)
1033 timer, fm = gettimer(ui, opts)
1033 timer, fm = gettimer(ui, opts)
1034 heads = repo.changelog.headrevs()
1034 heads = repo.changelog.headrevs()
1035
1035
1036 def d():
1036 def d():
1037 for a in repo.changelog.ancestors(heads):
1037 for a in repo.changelog.ancestors(heads):
1038 pass
1038 pass
1039
1039
1040 timer(d)
1040 timer(d)
1041 fm.end()
1041 fm.end()
1042
1042
1043
1043
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 def perfancestorset(ui, repo, revset, **opts):
1045 def perfancestorset(ui, repo, revset, **opts):
1046 opts = _byteskwargs(opts)
1046 opts = _byteskwargs(opts)
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 revs = repo.revs(revset)
1048 revs = repo.revs(revset)
1049 heads = repo.changelog.headrevs()
1049 heads = repo.changelog.headrevs()
1050
1050
1051 def d():
1051 def d():
1052 s = repo.changelog.ancestors(heads)
1052 s = repo.changelog.ancestors(heads)
1053 for rev in revs:
1053 for rev in revs:
1054 rev in s
1054 rev in s
1055
1055
1056 timer(d)
1056 timer(d)
1057 fm.end()
1057 fm.end()
1058
1058
1059
1059
1060 @command(
1060 @command(
1061 b'perf::delta-find',
1061 b'perf::delta-find',
1062 revlogopts + formatteropts,
1062 revlogopts + formatteropts,
1063 b'-c|-m|FILE REV',
1063 b'-c|-m|FILE REV',
1064 )
1064 )
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 """benchmark the process of finding a valid delta for a revlog revision
1066 """benchmark the process of finding a valid delta for a revlog revision
1067
1067
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 This perf command measures how much time we spend in this process. It
1070 This perf command measures how much time we spend in this process. It
1071 operates on an already stored revision.
1071 operates on an already stored revision.
1072
1072
1073 See `hg help debug-delta-find` for another related command.
1073 See `hg help debug-delta-find` for another related command.
1074 """
1074 """
1075 from mercurial import revlogutils
1075 from mercurial import revlogutils
1076 import mercurial.revlogutils.deltas as deltautil
1076 import mercurial.revlogutils.deltas as deltautil
1077
1077
1078 opts = _byteskwargs(opts)
1078 opts = _byteskwargs(opts)
1079 if arg_2 is None:
1079 if arg_2 is None:
1080 file_ = None
1080 file_ = None
1081 rev = arg_1
1081 rev = arg_1
1082 else:
1082 else:
1083 file_ = arg_1
1083 file_ = arg_1
1084 rev = arg_2
1084 rev = arg_2
1085
1085
1086 repo = repo.unfiltered()
1086 repo = repo.unfiltered()
1087
1087
1088 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1089
1089
1090 rev = int(rev)
1090 rev = int(rev)
1091
1091
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093
1093
1094 deltacomputer = deltautil.deltacomputer(revlog)
1094 deltacomputer = deltautil.deltacomputer(revlog)
1095
1095
1096 node = revlog.node(rev)
1096 node = revlog.node(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1098 p1 = revlog.node(p1r)
1098 p1 = revlog.node(p1r)
1099 p2 = revlog.node(p2r)
1099 p2 = revlog.node(p2r)
1100 full_text = revlog.revision(rev)
1100 full_text = revlog.revision(rev)
1101 textlen = len(full_text)
1101 textlen = len(full_text)
1102 cachedelta = None
1102 cachedelta = None
1103 flags = revlog.flags(rev)
1103 flags = revlog.flags(rev)
1104
1104
1105 revinfo = revlogutils.revisioninfo(
1105 revinfo = revlogutils.revisioninfo(
1106 node,
1106 node,
1107 p1,
1107 p1,
1108 p2,
1108 p2,
1109 [full_text], # btext
1109 [full_text], # btext
1110 textlen,
1110 textlen,
1111 cachedelta,
1111 cachedelta,
1112 flags,
1112 flags,
1113 )
1113 )
1114
1114
1115 # Note: we should probably purge the potential caches (like the full
1115 # Note: we should probably purge the potential caches (like the full
1116 # manifest cache) between runs.
1116 # manifest cache) between runs.
1117 def find_one():
1117 def find_one():
1118 with revlog._datafp() as fh:
1118 with revlog._datafp() as fh:
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120
1120
1121 timer(find_one)
1121 timer(find_one)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 def perfdiscovery(ui, repo, path, **opts):
1126 def perfdiscovery(ui, repo, path, **opts):
1127 """benchmark discovery between local repo and the peer at given path"""
1127 """benchmark discovery between local repo and the peer at given path"""
1128 repos = [repo, None]
1128 repos = [repo, None]
1129 timer, fm = gettimer(ui, opts)
1129 timer, fm = gettimer(ui, opts)
1130
1130
1131 try:
1131 try:
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133
1133
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 except ImportError:
1135 except ImportError:
1136 try:
1136 try:
1137 from mercurial.utils.urlutil import get_unique_pull_path
1137 from mercurial.utils.urlutil import get_unique_pull_path
1138
1138
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 except ImportError:
1140 except ImportError:
1141 path = ui.expandpath(path)
1141 path = ui.expandpath(path)
1142
1142
1143 def s():
1143 def s():
1144 repos[1] = hg.peer(ui, opts, path)
1144 repos[1] = hg.peer(ui, opts, path)
1145
1145
1146 def d():
1146 def d():
1147 setdiscovery.findcommonheads(ui, *repos)
1147 setdiscovery.findcommonheads(ui, *repos)
1148
1148
1149 timer(d, setup=s)
1149 timer(d, setup=s)
1150 fm.end()
1150 fm.end()
1151
1151
1152
1152
1153 @command(
1153 @command(
1154 b'perf::bookmarks|perfbookmarks',
1154 b'perf::bookmarks|perfbookmarks',
1155 formatteropts
1155 formatteropts
1156 + [
1156 + [
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 ],
1158 ],
1159 )
1159 )
1160 def perfbookmarks(ui, repo, **opts):
1160 def perfbookmarks(ui, repo, **opts):
1161 """benchmark parsing bookmarks from disk to memory"""
1161 """benchmark parsing bookmarks from disk to memory"""
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1163 timer, fm = gettimer(ui, opts)
1164
1164
1165 clearrevlogs = opts[b'clear_revlogs']
1165 clearrevlogs = opts[b'clear_revlogs']
1166
1166
1167 def s():
1167 def s():
1168 if clearrevlogs:
1168 if clearrevlogs:
1169 clearchangelog(repo)
1169 clearchangelog(repo)
1170 clearfilecache(repo, b'_bookmarks')
1170 clearfilecache(repo, b'_bookmarks')
1171
1171
1172 def d():
1172 def d():
1173 repo._bookmarks
1173 repo._bookmarks
1174
1174
1175 timer(d, setup=s)
1175 timer(d, setup=s)
1176 fm.end()
1176 fm.end()
1177
1177
1178
1178
1179 @command(
1179 @command(
1180 b'perf::bundle',
1180 b'perf::bundle',
1181 [
1181 [
1182 (
1182 (
1183 b'r',
1183 b'r',
1184 b'rev',
1184 b'rev',
1185 [],
1185 [],
1186 b'changesets to bundle',
1186 b'changesets to bundle',
1187 b'REV',
1187 b'REV',
1188 ),
1188 ),
1189 (
1189 (
1190 b't',
1190 b't',
1191 b'type',
1191 b'type',
1192 b'none',
1192 b'none',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1194 b'TYPE',
1194 b'TYPE',
1195 ),
1195 ),
1196 ]
1196 ]
1197 + formatteropts,
1197 + formatteropts,
1198 b'REVS',
1198 b'REVS',
1199 )
1199 )
1200 def perfbundle(ui, repo, *revs, **opts):
1200 def perfbundle(ui, repo, *revs, **opts):
1201 """benchmark the creation of a bundle from a repository
1201 """benchmark the creation of a bundle from a repository
1202
1202
1203 For now, this only supports "none" compression.
1203 For now, this only supports "none" compression.
1204 """
1204 """
1205 try:
1205 try:
1206 from mercurial import bundlecaches
1206 from mercurial import bundlecaches
1207
1207
1208 parsebundlespec = bundlecaches.parsebundlespec
1208 parsebundlespec = bundlecaches.parsebundlespec
1209 except ImportError:
1209 except ImportError:
1210 from mercurial import exchange
1210 from mercurial import exchange
1211
1211
1212 parsebundlespec = exchange.parsebundlespec
1212 parsebundlespec = exchange.parsebundlespec
1213
1213
1214 from mercurial import discovery
1214 from mercurial import discovery
1215 from mercurial import bundle2
1215 from mercurial import bundle2
1216
1216
1217 opts = _byteskwargs(opts)
1217 opts = _byteskwargs(opts)
1218 timer, fm = gettimer(ui, opts)
1218 timer, fm = gettimer(ui, opts)
1219
1219
1220 cl = repo.changelog
1220 cl = repo.changelog
1221 revs = list(revs)
1221 revs = list(revs)
1222 revs.extend(opts.get(b'rev', ()))
1222 revs.extend(opts.get(b'rev', ()))
1223 revs = scmutil.revrange(repo, revs)
1223 revs = scmutil.revrange(repo, revs)
1224 if not revs:
1224 if not revs:
1225 raise error.Abort(b"not revision specified")
1225 raise error.Abort(b"not revision specified")
1226 # make it a consistent set (ie: without topological gaps)
1226 # make it a consistent set (ie: without topological gaps)
1227 old_len = len(revs)
1227 old_len = len(revs)
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 if old_len != len(revs):
1229 if old_len != len(revs):
1230 new_count = len(revs) - old_len
1230 new_count = len(revs) - old_len
1231 msg = b"add %d new revisions to make it a consistent set\n"
1231 msg = b"add %d new revisions to make it a consistent set\n"
1232 ui.write_err(msg % new_count)
1232 ui.write_err(msg % new_count)
1233
1233
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 outgoing = discovery.outgoing(repo, bases, targets)
1236 outgoing = discovery.outgoing(repo, bases, targets)
1237
1237
1238 bundle_spec = opts.get(b'type')
1238 bundle_spec = opts.get(b'type')
1239
1239
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241
1241
1242 cgversion = bundle_spec.params.get(b"cg.version")
1242 cgversion = bundle_spec.params.get(b"cg.version")
1243 if cgversion is None:
1243 if cgversion is None:
1244 if bundle_spec.version == b'v1':
1244 if bundle_spec.version == b'v1':
1245 cgversion = b'01'
1245 cgversion = b'01'
1246 if bundle_spec.version == b'v2':
1246 if bundle_spec.version == b'v2':
1247 cgversion = b'02'
1247 cgversion = b'02'
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 err = b"repository does not support bundle version %s"
1249 err = b"repository does not support bundle version %s"
1250 raise error.Abort(err % cgversion)
1250 raise error.Abort(err % cgversion)
1251
1251
1252 if cgversion == b'01': # bundle1
1252 if cgversion == b'01': # bundle1
1253 bversion = b'HG10' + bundle_spec.wirecompression
1253 bversion = b'HG10' + bundle_spec.wirecompression
1254 bcompression = None
1254 bcompression = None
1255 elif cgversion in (b'02', b'03'):
1255 elif cgversion in (b'02', b'03'):
1256 bversion = b'HG20'
1256 bversion = b'HG20'
1257 bcompression = bundle_spec.wirecompression
1257 bcompression = bundle_spec.wirecompression
1258 else:
1258 else:
1259 err = b'perf::bundle: unexpected changegroup version %s'
1259 err = b'perf::bundle: unexpected changegroup version %s'
1260 raise error.ProgrammingError(err % cgversion)
1260 raise error.ProgrammingError(err % cgversion)
1261
1261
1262 if bcompression is None:
1262 if bcompression is None:
1263 bcompression = b'UN'
1263 bcompression = b'UN'
1264
1264
1265 if bcompression != b'UN':
1265 if bcompression != b'UN':
1266 err = b'perf::bundle: compression currently unsupported: %s'
1266 err = b'perf::bundle: compression currently unsupported: %s'
1267 raise error.ProgrammingError(err % bcompression)
1267 raise error.ProgrammingError(err % bcompression)
1268
1268
1269 def do_bundle():
1269 def do_bundle():
1270 bundle2.writenewbundle(
1270 bundle2.writenewbundle(
1271 ui,
1271 ui,
1272 repo,
1272 repo,
1273 b'perf::bundle',
1273 b'perf::bundle',
1274 os.devnull,
1274 os.devnull,
1275 bversion,
1275 bversion,
1276 outgoing,
1276 outgoing,
1277 bundle_spec.params,
1277 bundle_spec.params,
1278 )
1278 )
1279
1279
1280 timer(do_bundle)
1280 timer(do_bundle)
1281 fm.end()
1281 fm.end()
1282
1282
1283
1283
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1286 """Benchmark reading of bundle files.
1286 """Benchmark reading of bundle files.
1287
1287
1288 This command is meant to isolate the I/O part of bundle reading as
1288 This command is meant to isolate the I/O part of bundle reading as
1289 much as possible.
1289 much as possible.
1290 """
1290 """
1291 from mercurial import (
1291 from mercurial import (
1292 bundle2,
1292 bundle2,
1293 exchange,
1293 exchange,
1294 streamclone,
1294 streamclone,
1295 )
1295 )
1296
1296
1297 opts = _byteskwargs(opts)
1297 opts = _byteskwargs(opts)
1298
1298
1299 def makebench(fn):
1299 def makebench(fn):
1300 def run():
1300 def run():
1301 with open(bundlepath, b'rb') as fh:
1301 with open(bundlepath, b'rb') as fh:
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 fn(bundle)
1303 fn(bundle)
1304
1304
1305 return run
1305 return run
1306
1306
1307 def makereadnbytes(size):
1307 def makereadnbytes(size):
1308 def run():
1308 def run():
1309 with open(bundlepath, b'rb') as fh:
1309 with open(bundlepath, b'rb') as fh:
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 while bundle.read(size):
1311 while bundle.read(size):
1312 pass
1312 pass
1313
1313
1314 return run
1314 return run
1315
1315
1316 def makestdioread(size):
1316 def makestdioread(size):
1317 def run():
1317 def run():
1318 with open(bundlepath, b'rb') as fh:
1318 with open(bundlepath, b'rb') as fh:
1319 while fh.read(size):
1319 while fh.read(size):
1320 pass
1320 pass
1321
1321
1322 return run
1322 return run
1323
1323
1324 # bundle1
1324 # bundle1
1325
1325
1326 def deltaiter(bundle):
1326 def deltaiter(bundle):
1327 for delta in bundle.deltaiter():
1327 for delta in bundle.deltaiter():
1328 pass
1328 pass
1329
1329
1330 def iterchunks(bundle):
1330 def iterchunks(bundle):
1331 for chunk in bundle.getchunks():
1331 for chunk in bundle.getchunks():
1332 pass
1332 pass
1333
1333
1334 # bundle2
1334 # bundle2
1335
1335
1336 def forwardchunks(bundle):
1336 def forwardchunks(bundle):
1337 for chunk in bundle._forwardchunks():
1337 for chunk in bundle._forwardchunks():
1338 pass
1338 pass
1339
1339
1340 def iterparts(bundle):
1340 def iterparts(bundle):
1341 for part in bundle.iterparts():
1341 for part in bundle.iterparts():
1342 pass
1342 pass
1343
1343
1344 def iterpartsseekable(bundle):
1344 def iterpartsseekable(bundle):
1345 for part in bundle.iterparts(seekable=True):
1345 for part in bundle.iterparts(seekable=True):
1346 pass
1346 pass
1347
1347
1348 def seek(bundle):
1348 def seek(bundle):
1349 for part in bundle.iterparts(seekable=True):
1349 for part in bundle.iterparts(seekable=True):
1350 part.seek(0, os.SEEK_END)
1350 part.seek(0, os.SEEK_END)
1351
1351
1352 def makepartreadnbytes(size):
1352 def makepartreadnbytes(size):
1353 def run():
1353 def run():
1354 with open(bundlepath, b'rb') as fh:
1354 with open(bundlepath, b'rb') as fh:
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 for part in bundle.iterparts():
1356 for part in bundle.iterparts():
1357 while part.read(size):
1357 while part.read(size):
1358 pass
1358 pass
1359
1359
1360 return run
1360 return run
1361
1361
1362 benches = [
1362 benches = [
1363 (makestdioread(8192), b'read(8k)'),
1363 (makestdioread(8192), b'read(8k)'),
1364 (makestdioread(16384), b'read(16k)'),
1364 (makestdioread(16384), b'read(16k)'),
1365 (makestdioread(32768), b'read(32k)'),
1365 (makestdioread(32768), b'read(32k)'),
1366 (makestdioread(131072), b'read(128k)'),
1366 (makestdioread(131072), b'read(128k)'),
1367 ]
1367 ]
1368
1368
1369 with open(bundlepath, b'rb') as fh:
1369 with open(bundlepath, b'rb') as fh:
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1371
1371
1372 if isinstance(bundle, changegroup.cg1unpacker):
1372 if isinstance(bundle, changegroup.cg1unpacker):
1373 benches.extend(
1373 benches.extend(
1374 [
1374 [
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 ]
1381 ]
1382 )
1382 )
1383 elif isinstance(bundle, bundle2.unbundle20):
1383 elif isinstance(bundle, bundle2.unbundle20):
1384 benches.extend(
1384 benches.extend(
1385 [
1385 [
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1388 (
1388 (
1389 makebench(iterpartsseekable),
1389 makebench(iterpartsseekable),
1390 b'bundle2 iterparts() seekable',
1390 b'bundle2 iterparts() seekable',
1391 ),
1391 ),
1392 (makebench(seek), b'bundle2 part seek()'),
1392 (makebench(seek), b'bundle2 part seek()'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 ]
1397 ]
1398 )
1398 )
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 raise error.Abort(b'stream clone bundles not supported')
1400 raise error.Abort(b'stream clone bundles not supported')
1401 else:
1401 else:
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403
1403
1404 for fn, title in benches:
1404 for fn, title in benches:
1405 timer, fm = gettimer(ui, opts)
1405 timer, fm = gettimer(ui, opts)
1406 timer(fn, title=title)
1406 timer(fn, title=title)
1407 fm.end()
1407 fm.end()
1408
1408
1409
1409
1410 @command(
1410 @command(
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 formatteropts
1412 formatteropts
1413 + [
1413 + [
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 ],
1416 ],
1417 )
1417 )
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 """Benchmark producing a changelog group for a changegroup.
1419 """Benchmark producing a changelog group for a changegroup.
1420
1420
1421 This measures the time spent processing the changelog during a
1421 This measures the time spent processing the changelog during a
1422 bundle operation. This occurs during `hg bundle` and on a server
1422 bundle operation. This occurs during `hg bundle` and on a server
1423 processing a `getbundle` wire protocol request (handles clones
1423 processing a `getbundle` wire protocol request (handles clones
1424 and pull requests).
1424 and pull requests).
1425
1425
1426 By default, all revisions are added to the changegroup.
1426 By default, all revisions are added to the changegroup.
1427 """
1427 """
1428 opts = _byteskwargs(opts)
1428 opts = _byteskwargs(opts)
1429 cl = repo.changelog
1429 cl = repo.changelog
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 bundler = changegroup.getbundler(cgversion, repo)
1431 bundler = changegroup.getbundler(cgversion, repo)
1432
1432
1433 def d():
1433 def d():
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1435 for chunk in chunks:
1435 for chunk in chunks:
1436 pass
1436 pass
1437
1437
1438 timer, fm = gettimer(ui, opts)
1438 timer, fm = gettimer(ui, opts)
1439
1439
1440 # Terminal printing can interfere with timing. So disable it.
1440 # Terminal printing can interfere with timing. So disable it.
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1442 timer(d)
1442 timer(d)
1443
1443
1444 fm.end()
1444 fm.end()
1445
1445
1446
1446
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1448 def perfdirs(ui, repo, **opts):
1448 def perfdirs(ui, repo, **opts):
1449 opts = _byteskwargs(opts)
1449 opts = _byteskwargs(opts)
1450 timer, fm = gettimer(ui, opts)
1450 timer, fm = gettimer(ui, opts)
1451 dirstate = repo.dirstate
1451 dirstate = repo.dirstate
1452 b'a' in dirstate
1452 b'a' in dirstate
1453
1453
1454 def d():
1454 def d():
1455 dirstate.hasdir(b'a')
1455 dirstate.hasdir(b'a')
1456 try:
1456 try:
1457 del dirstate._map._dirs
1457 del dirstate._map._dirs
1458 except AttributeError:
1458 except AttributeError:
1459 pass
1459 pass
1460
1460
1461 timer(d)
1461 timer(d)
1462 fm.end()
1462 fm.end()
1463
1463
1464
1464
1465 @command(
1465 @command(
1466 b'perf::dirstate|perfdirstate',
1466 b'perf::dirstate|perfdirstate',
1467 [
1467 [
1468 (
1468 (
1469 b'',
1469 b'',
1470 b'iteration',
1470 b'iteration',
1471 None,
1471 None,
1472 b'benchmark a full iteration for the dirstate',
1472 b'benchmark a full iteration for the dirstate',
1473 ),
1473 ),
1474 (
1474 (
1475 b'',
1475 b'',
1476 b'contains',
1476 b'contains',
1477 None,
1477 None,
1478 b'benchmark a large amount of `nf in dirstate` calls',
1478 b'benchmark a large amount of `nf in dirstate` calls',
1479 ),
1479 ),
1480 ]
1480 ]
1481 + formatteropts,
1481 + formatteropts,
1482 )
1482 )
1483 def perfdirstate(ui, repo, **opts):
1483 def perfdirstate(ui, repo, **opts):
1484 """benchmap the time of various distate operations
1484 """benchmap the time of various distate operations
1485
1485
1486 By default benchmark the time necessary to load a dirstate from scratch.
1486 By default benchmark the time necessary to load a dirstate from scratch.
1487 The dirstate is loaded to the point were a "contains" request can be
1487 The dirstate is loaded to the point were a "contains" request can be
1488 answered.
1488 answered.
1489 """
1489 """
1490 opts = _byteskwargs(opts)
1490 opts = _byteskwargs(opts)
1491 timer, fm = gettimer(ui, opts)
1491 timer, fm = gettimer(ui, opts)
1492 b"a" in repo.dirstate
1492 b"a" in repo.dirstate
1493
1493
1494 if opts[b'iteration'] and opts[b'contains']:
1494 if opts[b'iteration'] and opts[b'contains']:
1495 msg = b'only specify one of --iteration or --contains'
1495 msg = b'only specify one of --iteration or --contains'
1496 raise error.Abort(msg)
1496 raise error.Abort(msg)
1497
1497
1498 if opts[b'iteration']:
1498 if opts[b'iteration']:
1499 setup = None
1499 setup = None
1500 dirstate = repo.dirstate
1500 dirstate = repo.dirstate
1501
1501
1502 def d():
1502 def d():
1503 for f in dirstate:
1503 for f in dirstate:
1504 pass
1504 pass
1505
1505
1506 elif opts[b'contains']:
1506 elif opts[b'contains']:
1507 setup = None
1507 setup = None
1508 dirstate = repo.dirstate
1508 dirstate = repo.dirstate
1509 allfiles = list(dirstate)
1509 allfiles = list(dirstate)
1510 # also add file path that will be "missing" from the dirstate
1510 # also add file path that will be "missing" from the dirstate
1511 allfiles.extend([f[::-1] for f in allfiles])
1511 allfiles.extend([f[::-1] for f in allfiles])
1512
1512
1513 def d():
1513 def d():
1514 for f in allfiles:
1514 for f in allfiles:
1515 f in dirstate
1515 f in dirstate
1516
1516
1517 else:
1517 else:
1518
1518
1519 def setup():
1519 def setup():
1520 repo.dirstate.invalidate()
1520 repo.dirstate.invalidate()
1521
1521
1522 def d():
1522 def d():
1523 b"a" in repo.dirstate
1523 b"a" in repo.dirstate
1524
1524
1525 timer(d, setup=setup)
1525 timer(d, setup=setup)
1526 fm.end()
1526 fm.end()
1527
1527
1528
1528
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 def perfdirstatedirs(ui, repo, **opts):
1530 def perfdirstatedirs(ui, repo, **opts):
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 opts = _byteskwargs(opts)
1532 opts = _byteskwargs(opts)
1533 timer, fm = gettimer(ui, opts)
1533 timer, fm = gettimer(ui, opts)
1534 repo.dirstate.hasdir(b"a")
1534 repo.dirstate.hasdir(b"a")
1535
1535
1536 def setup():
1536 def setup():
1537 try:
1537 try:
1538 del repo.dirstate._map._dirs
1538 del repo.dirstate._map._dirs
1539 except AttributeError:
1539 except AttributeError:
1540 pass
1540 pass
1541
1541
1542 def d():
1542 def d():
1543 repo.dirstate.hasdir(b"a")
1543 repo.dirstate.hasdir(b"a")
1544
1544
1545 timer(d, setup=setup)
1545 timer(d, setup=setup)
1546 fm.end()
1546 fm.end()
1547
1547
1548
1548
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 def perfdirstatefoldmap(ui, repo, **opts):
1550 def perfdirstatefoldmap(ui, repo, **opts):
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1552
1552
1553 The dirstate filefoldmap cache is dropped between every request.
1553 The dirstate filefoldmap cache is dropped between every request.
1554 """
1554 """
1555 opts = _byteskwargs(opts)
1555 opts = _byteskwargs(opts)
1556 timer, fm = gettimer(ui, opts)
1556 timer, fm = gettimer(ui, opts)
1557 dirstate = repo.dirstate
1557 dirstate = repo.dirstate
1558 dirstate._map.filefoldmap.get(b'a')
1558 dirstate._map.filefoldmap.get(b'a')
1559
1559
1560 def setup():
1560 def setup():
1561 del dirstate._map.filefoldmap
1561 del dirstate._map.filefoldmap
1562
1562
1563 def d():
1563 def d():
1564 dirstate._map.filefoldmap.get(b'a')
1564 dirstate._map.filefoldmap.get(b'a')
1565
1565
1566 timer(d, setup=setup)
1566 timer(d, setup=setup)
1567 fm.end()
1567 fm.end()
1568
1568
1569
1569
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 def perfdirfoldmap(ui, repo, **opts):
1571 def perfdirfoldmap(ui, repo, **opts):
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573
1573
1574 The dirstate dirfoldmap cache is dropped between every request.
1574 The dirstate dirfoldmap cache is dropped between every request.
1575 """
1575 """
1576 opts = _byteskwargs(opts)
1576 opts = _byteskwargs(opts)
1577 timer, fm = gettimer(ui, opts)
1577 timer, fm = gettimer(ui, opts)
1578 dirstate = repo.dirstate
1578 dirstate = repo.dirstate
1579 dirstate._map.dirfoldmap.get(b'a')
1579 dirstate._map.dirfoldmap.get(b'a')
1580
1580
1581 def setup():
1581 def setup():
1582 del dirstate._map.dirfoldmap
1582 del dirstate._map.dirfoldmap
1583 try:
1583 try:
1584 del dirstate._map._dirs
1584 del dirstate._map._dirs
1585 except AttributeError:
1585 except AttributeError:
1586 pass
1586 pass
1587
1587
1588 def d():
1588 def d():
1589 dirstate._map.dirfoldmap.get(b'a')
1589 dirstate._map.dirfoldmap.get(b'a')
1590
1590
1591 timer(d, setup=setup)
1591 timer(d, setup=setup)
1592 fm.end()
1592 fm.end()
1593
1593
1594
1594
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 def perfdirstatewrite(ui, repo, **opts):
1596 def perfdirstatewrite(ui, repo, **opts):
1597 """benchmap the time it take to write a dirstate on disk"""
1597 """benchmap the time it take to write a dirstate on disk"""
1598 opts = _byteskwargs(opts)
1598 opts = _byteskwargs(opts)
1599 timer, fm = gettimer(ui, opts)
1599 timer, fm = gettimer(ui, opts)
1600 ds = repo.dirstate
1600 ds = repo.dirstate
1601 b"a" in ds
1601 b"a" in ds
1602
1602
1603 def setup():
1603 def setup():
1604 ds._dirty = True
1604 ds._dirty = True
1605
1605
1606 def d():
1606 def d():
1607 ds.write(repo.currenttransaction())
1607 ds.write(repo.currenttransaction())
1608
1608
1609 with repo.wlock():
1609 with repo.wlock():
1610 timer(d, setup=setup)
1610 timer(d, setup=setup)
1611 fm.end()
1611 fm.end()
1612
1612
1613
1613
1614 def _getmergerevs(repo, opts):
1614 def _getmergerevs(repo, opts):
1615 """parse command argument to return rev involved in merge
1615 """parse command argument to return rev involved in merge
1616
1616
1617 input: options dictionnary with `rev`, `from` and `bse`
1617 input: options dictionnary with `rev`, `from` and `bse`
1618 output: (localctx, otherctx, basectx)
1618 output: (localctx, otherctx, basectx)
1619 """
1619 """
1620 if opts[b'from']:
1620 if opts[b'from']:
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 wctx = repo[fromrev]
1622 wctx = repo[fromrev]
1623 else:
1623 else:
1624 wctx = repo[None]
1624 wctx = repo[None]
1625 # we don't want working dir files to be stat'd in the benchmark, so
1625 # we don't want working dir files to be stat'd in the benchmark, so
1626 # prime that cache
1626 # prime that cache
1627 wctx.dirty()
1627 wctx.dirty()
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 if opts[b'base']:
1629 if opts[b'base']:
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 ancestor = repo[fromrev]
1631 ancestor = repo[fromrev]
1632 else:
1632 else:
1633 ancestor = wctx.ancestor(rctx)
1633 ancestor = wctx.ancestor(rctx)
1634 return (wctx, rctx, ancestor)
1634 return (wctx, rctx, ancestor)
1635
1635
1636
1636
1637 @command(
1637 @command(
1638 b'perf::mergecalculate|perfmergecalculate',
1638 b'perf::mergecalculate|perfmergecalculate',
1639 [
1639 [
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1641 (b'', b'from', b'', b'rev to merge from'),
1641 (b'', b'from', b'', b'rev to merge from'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1643 ]
1643 ]
1644 + formatteropts,
1644 + formatteropts,
1645 )
1645 )
1646 def perfmergecalculate(ui, repo, **opts):
1646 def perfmergecalculate(ui, repo, **opts):
1647 opts = _byteskwargs(opts)
1647 opts = _byteskwargs(opts)
1648 timer, fm = gettimer(ui, opts)
1648 timer, fm = gettimer(ui, opts)
1649
1649
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651
1651
1652 def d():
1652 def d():
1653 # acceptremote is True because we don't want prompts in the middle of
1653 # acceptremote is True because we don't want prompts in the middle of
1654 # our benchmark
1654 # our benchmark
1655 merge.calculateupdates(
1655 merge.calculateupdates(
1656 repo,
1656 repo,
1657 wctx,
1657 wctx,
1658 rctx,
1658 rctx,
1659 [ancestor],
1659 [ancestor],
1660 branchmerge=False,
1660 branchmerge=False,
1661 force=False,
1661 force=False,
1662 acceptremote=True,
1662 acceptremote=True,
1663 followcopies=True,
1663 followcopies=True,
1664 )
1664 )
1665
1665
1666 timer(d)
1666 timer(d)
1667 fm.end()
1667 fm.end()
1668
1668
1669
1669
1670 @command(
1670 @command(
1671 b'perf::mergecopies|perfmergecopies',
1671 b'perf::mergecopies|perfmergecopies',
1672 [
1672 [
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1674 (b'', b'from', b'', b'rev to merge from'),
1674 (b'', b'from', b'', b'rev to merge from'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1676 ]
1676 ]
1677 + formatteropts,
1677 + formatteropts,
1678 )
1678 )
1679 def perfmergecopies(ui, repo, **opts):
1679 def perfmergecopies(ui, repo, **opts):
1680 """measure runtime of `copies.mergecopies`"""
1680 """measure runtime of `copies.mergecopies`"""
1681 opts = _byteskwargs(opts)
1681 opts = _byteskwargs(opts)
1682 timer, fm = gettimer(ui, opts)
1682 timer, fm = gettimer(ui, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684
1684
1685 def d():
1685 def d():
1686 # acceptremote is True because we don't want prompts in the middle of
1686 # acceptremote is True because we don't want prompts in the middle of
1687 # our benchmark
1687 # our benchmark
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1689
1689
1690 timer(d)
1690 timer(d)
1691 fm.end()
1691 fm.end()
1692
1692
1693
1693
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 """benchmark the copy tracing logic"""
1696 """benchmark the copy tracing logic"""
1697 opts = _byteskwargs(opts)
1697 opts = _byteskwargs(opts)
1698 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701
1701
1702 def d():
1702 def d():
1703 copies.pathcopies(ctx1, ctx2)
1703 copies.pathcopies(ctx1, ctx2)
1704
1704
1705 timer(d)
1705 timer(d)
1706 fm.end()
1706 fm.end()
1707
1707
1708
1708
1709 @command(
1709 @command(
1710 b'perf::phases|perfphases',
1710 b'perf::phases|perfphases',
1711 [
1711 [
1712 (b'', b'full', False, b'include file reading time too'),
1712 (b'', b'full', False, b'include file reading time too'),
1713 ]
1713 ]
1714 + formatteropts,
1714 + formatteropts,
1715 b"",
1715 b"",
1716 )
1716 )
1717 def perfphases(ui, repo, **opts):
1717 def perfphases(ui, repo, **opts):
1718 """benchmark phasesets computation"""
1718 """benchmark phasesets computation"""
1719 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1721 _phases = repo._phasecache
1721 _phases = repo._phasecache
1722 full = opts.get(b'full')
1722 full = opts.get(b'full')
1723 tip_rev = repo.changelog.tiprev()
1723 tip_rev = repo.changelog.tiprev()
1724
1724
1725 def d():
1725 def d():
1726 phases = _phases
1726 phases = _phases
1727 if full:
1727 if full:
1728 clearfilecache(repo, b'_phasecache')
1728 clearfilecache(repo, b'_phasecache')
1729 phases = repo._phasecache
1729 phases = repo._phasecache
1730 phases.invalidate()
1730 phases.invalidate()
1731 phases.phase(repo, tip_rev)
1731 phases.phase(repo, tip_rev)
1732
1732
1733 timer(d)
1733 timer(d)
1734 fm.end()
1734 fm.end()
1735
1735
1736
1736
1737 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1737 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1738 def perfphasesremote(ui, repo, dest=None, **opts):
1738 def perfphasesremote(ui, repo, dest=None, **opts):
1739 """benchmark time needed to analyse phases of the remote server"""
1739 """benchmark time needed to analyse phases of the remote server"""
1740 from mercurial.node import bin
1740 from mercurial.node import bin
1741 from mercurial import (
1741 from mercurial import (
1742 exchange,
1742 exchange,
1743 hg,
1743 hg,
1744 phases,
1744 phases,
1745 )
1745 )
1746
1746
1747 opts = _byteskwargs(opts)
1747 opts = _byteskwargs(opts)
1748 timer, fm = gettimer(ui, opts)
1748 timer, fm = gettimer(ui, opts)
1749
1749
1750 path = ui.getpath(dest, default=(b'default-push', b'default'))
1750 path = ui.getpath(dest, default=(b'default-push', b'default'))
1751 if not path:
1751 if not path:
1752 raise error.Abort(
1752 raise error.Abort(
1753 b'default repository not configured!',
1753 b'default repository not configured!',
1754 hint=b"see 'hg help config.paths'",
1754 hint=b"see 'hg help config.paths'",
1755 )
1755 )
1756 if util.safehasattr(path, 'main_path'):
1756 if util.safehasattr(path, 'main_path'):
1757 path = path.get_push_variant()
1757 path = path.get_push_variant()
1758 dest = path.loc
1758 dest = path.loc
1759 else:
1759 else:
1760 dest = path.pushloc or path.loc
1760 dest = path.pushloc or path.loc
1761 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1761 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1762 other = hg.peer(repo, opts, dest)
1762 other = hg.peer(repo, opts, dest)
1763
1763
1764 # easier to perform discovery through the operation
1764 # easier to perform discovery through the operation
1765 op = exchange.pushoperation(repo, other)
1765 op = exchange.pushoperation(repo, other)
1766 exchange._pushdiscoverychangeset(op)
1766 exchange._pushdiscoverychangeset(op)
1767
1767
1768 remotesubset = op.fallbackheads
1768 remotesubset = op.fallbackheads
1769
1769
1770 with other.commandexecutor() as e:
1770 with other.commandexecutor() as e:
1771 remotephases = e.callcommand(
1771 remotephases = e.callcommand(
1772 b'listkeys', {b'namespace': b'phases'}
1772 b'listkeys', {b'namespace': b'phases'}
1773 ).result()
1773 ).result()
1774 del other
1774 del other
1775 publishing = remotephases.get(b'publishing', False)
1775 publishing = remotephases.get(b'publishing', False)
1776 if publishing:
1776 if publishing:
1777 ui.statusnoi18n(b'publishing: yes\n')
1777 ui.statusnoi18n(b'publishing: yes\n')
1778 else:
1778 else:
1779 ui.statusnoi18n(b'publishing: no\n')
1779 ui.statusnoi18n(b'publishing: no\n')
1780
1780
1781 has_node = getattr(repo.changelog.index, 'has_node', None)
1781 has_node = getattr(repo.changelog.index, 'has_node', None)
1782 if has_node is None:
1782 if has_node is None:
1783 has_node = repo.changelog.nodemap.__contains__
1783 has_node = repo.changelog.nodemap.__contains__
1784 nonpublishroots = 0
1784 nonpublishroots = 0
1785 for nhex, phase in remotephases.iteritems():
1785 for nhex, phase in remotephases.iteritems():
1786 if nhex == b'publishing': # ignore data related to publish option
1786 if nhex == b'publishing': # ignore data related to publish option
1787 continue
1787 continue
1788 node = bin(nhex)
1788 node = bin(nhex)
1789 if has_node(node) and int(phase):
1789 if has_node(node) and int(phase):
1790 nonpublishroots += 1
1790 nonpublishroots += 1
1791 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1791 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1792 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1792 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1793
1793
1794 def d():
1794 def d():
1795 phases.remotephasessummary(repo, remotesubset, remotephases)
1795 phases.remotephasessummary(repo, remotesubset, remotephases)
1796
1796
1797 timer(d)
1797 timer(d)
1798 fm.end()
1798 fm.end()
1799
1799
1800
1800
1801 @command(
1801 @command(
1802 b'perf::manifest|perfmanifest',
1802 b'perf::manifest|perfmanifest',
1803 [
1803 [
1804 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1804 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1805 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1805 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1806 ]
1806 ]
1807 + formatteropts,
1807 + formatteropts,
1808 b'REV|NODE',
1808 b'REV|NODE',
1809 )
1809 )
1810 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1810 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1811 """benchmark the time to read a manifest from disk and return a usable
1811 """benchmark the time to read a manifest from disk and return a usable
1812 dict-like object
1812 dict-like object
1813
1813
1814 Manifest caches are cleared before retrieval."""
1814 Manifest caches are cleared before retrieval."""
1815 opts = _byteskwargs(opts)
1815 opts = _byteskwargs(opts)
1816 timer, fm = gettimer(ui, opts)
1816 timer, fm = gettimer(ui, opts)
1817 if not manifest_rev:
1817 if not manifest_rev:
1818 ctx = scmutil.revsingle(repo, rev, rev)
1818 ctx = scmutil.revsingle(repo, rev, rev)
1819 t = ctx.manifestnode()
1819 t = ctx.manifestnode()
1820 else:
1820 else:
1821 from mercurial.node import bin
1821 from mercurial.node import bin
1822
1822
1823 if len(rev) == 40:
1823 if len(rev) == 40:
1824 t = bin(rev)
1824 t = bin(rev)
1825 else:
1825 else:
1826 try:
1826 try:
1827 rev = int(rev)
1827 rev = int(rev)
1828
1828
1829 if util.safehasattr(repo.manifestlog, b'getstorage'):
1829 if util.safehasattr(repo.manifestlog, b'getstorage'):
1830 t = repo.manifestlog.getstorage(b'').node(rev)
1830 t = repo.manifestlog.getstorage(b'').node(rev)
1831 else:
1831 else:
1832 t = repo.manifestlog._revlog.lookup(rev)
1832 t = repo.manifestlog._revlog.lookup(rev)
1833 except ValueError:
1833 except ValueError:
1834 raise error.Abort(
1834 raise error.Abort(
1835 b'manifest revision must be integer or full node'
1835 b'manifest revision must be integer or full node'
1836 )
1836 )
1837
1837
1838 def d():
1838 def d():
1839 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1839 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1840 repo.manifestlog[t].read()
1840 repo.manifestlog[t].read()
1841
1841
1842 timer(d)
1842 timer(d)
1843 fm.end()
1843 fm.end()
1844
1844
1845
1845
1846 @command(b'perf::changeset|perfchangeset', formatteropts)
1846 @command(b'perf::changeset|perfchangeset', formatteropts)
1847 def perfchangeset(ui, repo, rev, **opts):
1847 def perfchangeset(ui, repo, rev, **opts):
1848 opts = _byteskwargs(opts)
1848 opts = _byteskwargs(opts)
1849 timer, fm = gettimer(ui, opts)
1849 timer, fm = gettimer(ui, opts)
1850 n = scmutil.revsingle(repo, rev).node()
1850 n = scmutil.revsingle(repo, rev).node()
1851
1851
1852 def d():
1852 def d():
1853 repo.changelog.read(n)
1853 repo.changelog.read(n)
1854 # repo.changelog._cache = None
1854 # repo.changelog._cache = None
1855
1855
1856 timer(d)
1856 timer(d)
1857 fm.end()
1857 fm.end()
1858
1858
1859
1859
1860 @command(b'perf::ignore|perfignore', formatteropts)
1860 @command(b'perf::ignore|perfignore', formatteropts)
1861 def perfignore(ui, repo, **opts):
1861 def perfignore(ui, repo, **opts):
1862 """benchmark operation related to computing ignore"""
1862 """benchmark operation related to computing ignore"""
1863 opts = _byteskwargs(opts)
1863 opts = _byteskwargs(opts)
1864 timer, fm = gettimer(ui, opts)
1864 timer, fm = gettimer(ui, opts)
1865 dirstate = repo.dirstate
1865 dirstate = repo.dirstate
1866
1866
1867 def setupone():
1867 def setupone():
1868 dirstate.invalidate()
1868 dirstate.invalidate()
1869 clearfilecache(dirstate, b'_ignore')
1869 clearfilecache(dirstate, b'_ignore')
1870
1870
1871 def runone():
1871 def runone():
1872 dirstate._ignore
1872 dirstate._ignore
1873
1873
1874 timer(runone, setup=setupone, title=b"load")
1874 timer(runone, setup=setupone, title=b"load")
1875 fm.end()
1875 fm.end()
1876
1876
1877
1877
1878 @command(
1878 @command(
1879 b'perf::index|perfindex',
1879 b'perf::index|perfindex',
1880 [
1880 [
1881 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1881 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1882 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1882 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1883 ]
1883 ]
1884 + formatteropts,
1884 + formatteropts,
1885 )
1885 )
1886 def perfindex(ui, repo, **opts):
1886 def perfindex(ui, repo, **opts):
1887 """benchmark index creation time followed by a lookup
1887 """benchmark index creation time followed by a lookup
1888
1888
1889 The default is to look `tip` up. Depending on the index implementation,
1889 The default is to look `tip` up. Depending on the index implementation,
1890 the revision looked up can matters. For example, an implementation
1890 the revision looked up can matters. For example, an implementation
1891 scanning the index will have a faster lookup time for `--rev tip` than for
1891 scanning the index will have a faster lookup time for `--rev tip` than for
1892 `--rev 0`. The number of looked up revisions and their order can also
1892 `--rev 0`. The number of looked up revisions and their order can also
1893 matters.
1893 matters.
1894
1894
1895 Example of useful set to test:
1895 Example of useful set to test:
1896
1896
1897 * tip
1897 * tip
1898 * 0
1898 * 0
1899 * -10:
1899 * -10:
1900 * :10
1900 * :10
1901 * -10: + :10
1901 * -10: + :10
1902 * :10: + -10:
1902 * :10: + -10:
1903 * -10000:
1903 * -10000:
1904 * -10000: + 0
1904 * -10000: + 0
1905
1905
1906 It is not currently possible to check for lookup of a missing node. For
1906 It is not currently possible to check for lookup of a missing node. For
1907 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1907 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1908 import mercurial.revlog
1908 import mercurial.revlog
1909
1909
1910 opts = _byteskwargs(opts)
1910 opts = _byteskwargs(opts)
1911 timer, fm = gettimer(ui, opts)
1911 timer, fm = gettimer(ui, opts)
1912 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1912 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1913 if opts[b'no_lookup']:
1913 if opts[b'no_lookup']:
1914 if opts['rev']:
1914 if opts['rev']:
1915 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1915 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1916 nodes = []
1916 nodes = []
1917 elif not opts[b'rev']:
1917 elif not opts[b'rev']:
1918 nodes = [repo[b"tip"].node()]
1918 nodes = [repo[b"tip"].node()]
1919 else:
1919 else:
1920 revs = scmutil.revrange(repo, opts[b'rev'])
1920 revs = scmutil.revrange(repo, opts[b'rev'])
1921 cl = repo.changelog
1921 cl = repo.changelog
1922 nodes = [cl.node(r) for r in revs]
1922 nodes = [cl.node(r) for r in revs]
1923
1923
1924 unfi = repo.unfiltered()
1924 unfi = repo.unfiltered()
1925 # find the filecache func directly
1925 # find the filecache func directly
1926 # This avoid polluting the benchmark with the filecache logic
1926 # This avoid polluting the benchmark with the filecache logic
1927 makecl = unfi.__class__.changelog.func
1927 makecl = unfi.__class__.changelog.func
1928
1928
1929 def setup():
1929 def setup():
1930 # probably not necessary, but for good measure
1930 # probably not necessary, but for good measure
1931 clearchangelog(unfi)
1931 clearchangelog(unfi)
1932
1932
1933 def d():
1933 def d():
1934 cl = makecl(unfi)
1934 cl = makecl(unfi)
1935 for n in nodes:
1935 for n in nodes:
1936 cl.rev(n)
1936 cl.rev(n)
1937
1937
1938 timer(d, setup=setup)
1938 timer(d, setup=setup)
1939 fm.end()
1939 fm.end()
1940
1940
1941
1941
1942 @command(
1942 @command(
1943 b'perf::nodemap|perfnodemap',
1943 b'perf::nodemap|perfnodemap',
1944 [
1944 [
1945 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1945 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1947 ]
1947 ]
1948 + formatteropts,
1948 + formatteropts,
1949 )
1949 )
1950 def perfnodemap(ui, repo, **opts):
1950 def perfnodemap(ui, repo, **opts):
1951 """benchmark the time necessary to look up revision from a cold nodemap
1951 """benchmark the time necessary to look up revision from a cold nodemap
1952
1952
1953 Depending on the implementation, the amount and order of revision we look
1953 Depending on the implementation, the amount and order of revision we look
1954 up can varies. Example of useful set to test:
1954 up can varies. Example of useful set to test:
1955 * tip
1955 * tip
1956 * 0
1956 * 0
1957 * -10:
1957 * -10:
1958 * :10
1958 * :10
1959 * -10: + :10
1959 * -10: + :10
1960 * :10: + -10:
1960 * :10: + -10:
1961 * -10000:
1961 * -10000:
1962 * -10000: + 0
1962 * -10000: + 0
1963
1963
1964 The command currently focus on valid binary lookup. Benchmarking for
1964 The command currently focus on valid binary lookup. Benchmarking for
1965 hexlookup, prefix lookup and missing lookup would also be valuable.
1965 hexlookup, prefix lookup and missing lookup would also be valuable.
1966 """
1966 """
1967 import mercurial.revlog
1967 import mercurial.revlog
1968
1968
1969 opts = _byteskwargs(opts)
1969 opts = _byteskwargs(opts)
1970 timer, fm = gettimer(ui, opts)
1970 timer, fm = gettimer(ui, opts)
1971 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1971 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1972
1972
1973 unfi = repo.unfiltered()
1973 unfi = repo.unfiltered()
1974 clearcaches = opts[b'clear_caches']
1974 clearcaches = opts[b'clear_caches']
1975 # find the filecache func directly
1975 # find the filecache func directly
1976 # This avoid polluting the benchmark with the filecache logic
1976 # This avoid polluting the benchmark with the filecache logic
1977 makecl = unfi.__class__.changelog.func
1977 makecl = unfi.__class__.changelog.func
1978 if not opts[b'rev']:
1978 if not opts[b'rev']:
1979 raise error.Abort(b'use --rev to specify revisions to look up')
1979 raise error.Abort(b'use --rev to specify revisions to look up')
1980 revs = scmutil.revrange(repo, opts[b'rev'])
1980 revs = scmutil.revrange(repo, opts[b'rev'])
1981 cl = repo.changelog
1981 cl = repo.changelog
1982 nodes = [cl.node(r) for r in revs]
1982 nodes = [cl.node(r) for r in revs]
1983
1983
1984 # use a list to pass reference to a nodemap from one closure to the next
1984 # use a list to pass reference to a nodemap from one closure to the next
1985 nodeget = [None]
1985 nodeget = [None]
1986
1986
1987 def setnodeget():
1987 def setnodeget():
1988 # probably not necessary, but for good measure
1988 # probably not necessary, but for good measure
1989 clearchangelog(unfi)
1989 clearchangelog(unfi)
1990 cl = makecl(unfi)
1990 cl = makecl(unfi)
1991 if util.safehasattr(cl.index, 'get_rev'):
1991 if util.safehasattr(cl.index, 'get_rev'):
1992 nodeget[0] = cl.index.get_rev
1992 nodeget[0] = cl.index.get_rev
1993 else:
1993 else:
1994 nodeget[0] = cl.nodemap.get
1994 nodeget[0] = cl.nodemap.get
1995
1995
1996 def d():
1996 def d():
1997 get = nodeget[0]
1997 get = nodeget[0]
1998 for n in nodes:
1998 for n in nodes:
1999 get(n)
1999 get(n)
2000
2000
2001 setup = None
2001 setup = None
2002 if clearcaches:
2002 if clearcaches:
2003
2003
2004 def setup():
2004 def setup():
2005 setnodeget()
2005 setnodeget()
2006
2006
2007 else:
2007 else:
2008 setnodeget()
2008 setnodeget()
2009 d() # prewarm the data structure
2009 d() # prewarm the data structure
2010 timer(d, setup=setup)
2010 timer(d, setup=setup)
2011 fm.end()
2011 fm.end()
2012
2012
2013
2013
2014 @command(b'perf::startup|perfstartup', formatteropts)
2014 @command(b'perf::startup|perfstartup', formatteropts)
2015 def perfstartup(ui, repo, **opts):
2015 def perfstartup(ui, repo, **opts):
2016 opts = _byteskwargs(opts)
2016 opts = _byteskwargs(opts)
2017 timer, fm = gettimer(ui, opts)
2017 timer, fm = gettimer(ui, opts)
2018
2018
2019 def d():
2019 def d():
2020 if os.name != 'nt':
2020 if os.name != 'nt':
2021 os.system(
2021 os.system(
2022 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2022 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2023 )
2023 )
2024 else:
2024 else:
2025 os.environ['HGRCPATH'] = r' '
2025 os.environ['HGRCPATH'] = r' '
2026 os.system("%s version -q > NUL" % sys.argv[0])
2026 os.system("%s version -q > NUL" % sys.argv[0])
2027
2027
2028 timer(d)
2028 timer(d)
2029 fm.end()
2029 fm.end()
2030
2030
2031
2031
2032 def _find_stream_generator(version):
2032 def _find_stream_generator(version):
2033 """find the proper generator function for this stream version"""
2033 """find the proper generator function for this stream version"""
2034 import mercurial.streamclone
2034 import mercurial.streamclone
2035
2035
2036 available = {}
2036 available = {}
2037
2037
2038 # try to fetch a v1 generator
2038 # try to fetch a v1 generator
2039 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2039 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2040 if generatev1 is not None:
2040 if generatev1 is not None:
2041
2041
2042 def generate(repo):
2042 def generate(repo):
2043 entries, bytes, data = generatev1(repo, None, None, True)
2043 entries, bytes, data = generatev1(repo, None, None, True)
2044 return data
2044 return data
2045
2045
2046 available[b'v1'] = generatev1
2046 available[b'v1'] = generatev1
2047 # try to fetch a v2 generator
2047 # try to fetch a v2 generator
2048 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2048 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2049 if generatev2 is not None:
2049 if generatev2 is not None:
2050
2050
2051 def generate(repo):
2051 def generate(repo):
2052 entries, bytes, data = generatev2(repo, None, None, True)
2052 entries, bytes, data = generatev2(repo, None, None, True)
2053 return data
2053 return data
2054
2054
2055 available[b'v2'] = generate
2055 available[b'v2'] = generate
2056 # try to fetch a v3 generator
2056 # try to fetch a v3 generator
2057 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2057 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2058 if generatev3 is not None:
2058 if generatev3 is not None:
2059
2059
2060 def generate(repo):
2060 def generate(repo):
2061 return generatev3(repo, None, None, True)
2061 return generatev3(repo, None, None, True)
2062
2062
2063 available[b'v3-exp'] = generate
2063 available[b'v3-exp'] = generate
2064
2064
2065 # resolve the request
2065 # resolve the request
2066 if version == b"latest":
2066 if version == b"latest":
2067 # latest is the highest non experimental version
2067 # latest is the highest non experimental version
2068 latest_key = max(v for v in available if b'-exp' not in v)
2068 latest_key = max(v for v in available if b'-exp' not in v)
2069 return available[latest_key]
2069 return available[latest_key]
2070 elif version in available:
2070 elif version in available:
2071 return available[version]
2071 return available[version]
2072 else:
2072 else:
2073 msg = b"unkown or unavailable version: %s"
2073 msg = b"unkown or unavailable version: %s"
2074 msg %= version
2074 msg %= version
2075 hint = b"available versions: %s"
2075 hint = b"available versions: %s"
2076 hint %= b', '.join(sorted(available))
2076 hint %= b', '.join(sorted(available))
2077 raise error.Abort(msg, hint=hint)
2077 raise error.Abort(msg, hint=hint)
2078
2078
2079
2079
2080 @command(
2080 @command(
2081 b'perf::stream-locked-section',
2081 b'perf::stream-locked-section',
2082 [
2082 [
2083 (
2083 (
2084 b'',
2084 b'',
2085 b'stream-version',
2085 b'stream-version',
2086 b'latest',
2086 b'latest',
2087 b'stream version to use ("v1", "v2", "v3-exp" '
2087 b'stream version to use ("v1", "v2", "v3-exp" '
2088 b'or "latest", (the default))',
2088 b'or "latest", (the default))',
2089 ),
2089 ),
2090 ]
2090 ]
2091 + formatteropts,
2091 + formatteropts,
2092 )
2092 )
2093 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2093 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2094 """benchmark the initial, repo-locked, section of a stream-clone"""
2094 """benchmark the initial, repo-locked, section of a stream-clone"""
2095
2095
2096 opts = _byteskwargs(opts)
2096 opts = _byteskwargs(opts)
2097 timer, fm = gettimer(ui, opts)
2097 timer, fm = gettimer(ui, opts)
2098
2098
2099 # deletion of the generator may trigger some cleanup that we do not want to
2099 # deletion of the generator may trigger some cleanup that we do not want to
2100 # measure
2100 # measure
2101 result_holder = [None]
2101 result_holder = [None]
2102
2102
2103 def setupone():
2103 def setupone():
2104 result_holder[0] = None
2104 result_holder[0] = None
2105
2105
2106 generate = _find_stream_generator(stream_version)
2106 generate = _find_stream_generator(stream_version)
2107
2107
2108 def runone():
2108 def runone():
2109 # the lock is held for the duration the initialisation
2109 # the lock is held for the duration the initialisation
2110 result_holder[0] = generate(repo)
2110 result_holder[0] = generate(repo)
2111
2111
2112 timer(runone, setup=setupone, title=b"load")
2112 timer(runone, setup=setupone, title=b"load")
2113 fm.end()
2113 fm.end()
2114
2114
2115
2115
2116 @command(
2116 @command(
2117 b'perf::stream-generate',
2117 b'perf::stream-generate',
2118 [
2118 [
2119 (
2119 (
2120 b'',
2120 b'',
2121 b'stream-version',
2121 b'stream-version',
2122 b'latest',
2122 b'latest',
2123 b'stream version to us ("v1", "v2", "v3-exp" '
2123 b'stream version to us ("v1", "v2", "v3-exp" '
2124 b'or "latest", (the default))',
2124 b'or "latest", (the default))',
2125 ),
2125 ),
2126 ]
2126 ]
2127 + formatteropts,
2127 + formatteropts,
2128 )
2128 )
2129 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2129 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2130 """benchmark the full generation of a stream clone"""
2130 """benchmark the full generation of a stream clone"""
2131
2131
2132 opts = _byteskwargs(opts)
2132 opts = _byteskwargs(opts)
2133 timer, fm = gettimer(ui, opts)
2133 timer, fm = gettimer(ui, opts)
2134
2134
2135 # deletion of the generator may trigger some cleanup that we do not want to
2135 # deletion of the generator may trigger some cleanup that we do not want to
2136 # measure
2136 # measure
2137
2137
2138 generate = _find_stream_generator(stream_version)
2138 generate = _find_stream_generator(stream_version)
2139
2139
2140 def runone():
2140 def runone():
2141 # the lock is held for the duration the initialisation
2141 # the lock is held for the duration the initialisation
2142 for chunk in generate(repo):
2142 for chunk in generate(repo):
2143 pass
2143 pass
2144
2144
2145 timer(runone, title=b"generate")
2145 timer(runone, title=b"generate")
2146 fm.end()
2146 fm.end()
2147
2147
2148
2148
2149 @command(
2149 @command(
2150 b'perf::stream-consume',
2150 b'perf::stream-consume',
2151 formatteropts,
2151 formatteropts,
2152 )
2152 )
2153 def perf_stream_clone_consume(ui, repo, filename, **opts):
2153 def perf_stream_clone_consume(ui, repo, filename, **opts):
2154 """benchmark the full application of a stream clone
2154 """benchmark the full application of a stream clone
2155
2155
2156 This include the creation of the repository
2156 This include the creation of the repository
2157 """
2157 """
2158 # try except to appease check code
2158 # try except to appease check code
2159 msg = b"mercurial too old, missing necessary module: %s"
2159 msg = b"mercurial too old, missing necessary module: %s"
2160 try:
2160 try:
2161 from mercurial import bundle2
2161 from mercurial import bundle2
2162 except ImportError as exc:
2162 except ImportError as exc:
2163 msg %= _bytestr(exc)
2163 msg %= _bytestr(exc)
2164 raise error.Abort(msg)
2164 raise error.Abort(msg)
2165 try:
2165 try:
2166 from mercurial import exchange
2166 from mercurial import exchange
2167 except ImportError as exc:
2167 except ImportError as exc:
2168 msg %= _bytestr(exc)
2168 msg %= _bytestr(exc)
2169 raise error.Abort(msg)
2169 raise error.Abort(msg)
2170 try:
2170 try:
2171 from mercurial import hg
2171 from mercurial import hg
2172 except ImportError as exc:
2172 except ImportError as exc:
2173 msg %= _bytestr(exc)
2173 msg %= _bytestr(exc)
2174 raise error.Abort(msg)
2174 raise error.Abort(msg)
2175 try:
2175 try:
2176 from mercurial import localrepo
2176 from mercurial import localrepo
2177 except ImportError as exc:
2177 except ImportError as exc:
2178 msg %= _bytestr(exc)
2178 msg %= _bytestr(exc)
2179 raise error.Abort(msg)
2179 raise error.Abort(msg)
2180
2180
2181 opts = _byteskwargs(opts)
2181 opts = _byteskwargs(opts)
2182 timer, fm = gettimer(ui, opts)
2182 timer, fm = gettimer(ui, opts)
2183
2183
2184 # deletion of the generator may trigger some cleanup that we do not want to
2184 # deletion of the generator may trigger some cleanup that we do not want to
2185 # measure
2185 # measure
2186 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2186 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2187 raise error.Abort("not a readable file: %s" % filename)
2187 raise error.Abort("not a readable file: %s" % filename)
2188
2188
2189 run_variables = [None, None]
2189 run_variables = [None, None]
2190
2190
2191 # we create the new repository next to the other one for two reasons:
2192 # - this way we use the same file system, which are relevant for benchmark
2193 # - if /tmp/ is small, the operation could overfills it.
2194 source_repo_dir = os.path.dirname(repo.root)
2195
2191 @contextlib.contextmanager
2196 @contextlib.contextmanager
2192 def context():
2197 def context():
2193 with open(filename, mode='rb') as bundle:
2198 with open(filename, mode='rb') as bundle:
2194 with tempfile.TemporaryDirectory() as tmp_dir:
2199 with tempfile.TemporaryDirectory(
2200 prefix=b'hg-perf-stream-consume-',
2201 dir=source_repo_dir,
2202 ) as tmp_dir:
2195 tmp_dir = fsencode(tmp_dir)
2203 tmp_dir = fsencode(tmp_dir)
2196 run_variables[0] = bundle
2204 run_variables[0] = bundle
2197 run_variables[1] = tmp_dir
2205 run_variables[1] = tmp_dir
2198 yield
2206 yield
2199 run_variables[0] = None
2207 run_variables[0] = None
2200 run_variables[1] = None
2208 run_variables[1] = None
2201
2209
2202 def runone():
2210 def runone():
2203 bundle = run_variables[0]
2211 bundle = run_variables[0]
2204 tmp_dir = run_variables[1]
2212 tmp_dir = run_variables[1]
2205
2213
2206 # we actually wants to copy all config to ensure the repo config is
2214 # we actually wants to copy all config to ensure the repo config is
2207 # taken in account during the benchmark
2215 # taken in account during the benchmark
2208 new_ui = repo.ui.__class__(repo.ui)
2216 new_ui = repo.ui.__class__(repo.ui)
2209 # only pass ui when no srcrepo
2217 # only pass ui when no srcrepo
2210 localrepo.createrepository(
2218 localrepo.createrepository(
2211 new_ui, tmp_dir, requirements=repo.requirements
2219 new_ui, tmp_dir, requirements=repo.requirements
2212 )
2220 )
2213 target = hg.repository(new_ui, tmp_dir)
2221 target = hg.repository(new_ui, tmp_dir)
2214 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2222 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2215 # stream v1
2223 # stream v1
2216 if util.safehasattr(gen, 'apply'):
2224 if util.safehasattr(gen, 'apply'):
2217 gen.apply(target)
2225 gen.apply(target)
2218 else:
2226 else:
2219 with target.transaction(b"perf::stream-consume") as tr:
2227 with target.transaction(b"perf::stream-consume") as tr:
2220 bundle2.applybundle(
2228 bundle2.applybundle(
2221 target,
2229 target,
2222 gen,
2230 gen,
2223 tr,
2231 tr,
2224 source=b'unbundle',
2232 source=b'unbundle',
2225 url=filename,
2233 url=filename,
2226 )
2234 )
2227
2235
2228 timer(runone, context=context, title=b"consume")
2236 timer(runone, context=context, title=b"consume")
2229 fm.end()
2237 fm.end()
2230
2238
2231
2239
2232 @command(b'perf::parents|perfparents', formatteropts)
2240 @command(b'perf::parents|perfparents', formatteropts)
2233 def perfparents(ui, repo, **opts):
2241 def perfparents(ui, repo, **opts):
2234 """benchmark the time necessary to fetch one changeset's parents.
2242 """benchmark the time necessary to fetch one changeset's parents.
2235
2243
2236 The fetch is done using the `node identifier`, traversing all object layers
2244 The fetch is done using the `node identifier`, traversing all object layers
2237 from the repository object. The first N revisions will be used for this
2245 from the repository object. The first N revisions will be used for this
2238 benchmark. N is controlled by the ``perf.parentscount`` config option
2246 benchmark. N is controlled by the ``perf.parentscount`` config option
2239 (default: 1000).
2247 (default: 1000).
2240 """
2248 """
2241 opts = _byteskwargs(opts)
2249 opts = _byteskwargs(opts)
2242 timer, fm = gettimer(ui, opts)
2250 timer, fm = gettimer(ui, opts)
2243 # control the number of commits perfparents iterates over
2251 # control the number of commits perfparents iterates over
2244 # experimental config: perf.parentscount
2252 # experimental config: perf.parentscount
2245 count = getint(ui, b"perf", b"parentscount", 1000)
2253 count = getint(ui, b"perf", b"parentscount", 1000)
2246 if len(repo.changelog) < count:
2254 if len(repo.changelog) < count:
2247 raise error.Abort(b"repo needs %d commits for this test" % count)
2255 raise error.Abort(b"repo needs %d commits for this test" % count)
2248 repo = repo.unfiltered()
2256 repo = repo.unfiltered()
2249 nl = [repo.changelog.node(i) for i in _xrange(count)]
2257 nl = [repo.changelog.node(i) for i in _xrange(count)]
2250
2258
2251 def d():
2259 def d():
2252 for n in nl:
2260 for n in nl:
2253 repo.changelog.parents(n)
2261 repo.changelog.parents(n)
2254
2262
2255 timer(d)
2263 timer(d)
2256 fm.end()
2264 fm.end()
2257
2265
2258
2266
2259 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2267 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2260 def perfctxfiles(ui, repo, x, **opts):
2268 def perfctxfiles(ui, repo, x, **opts):
2261 opts = _byteskwargs(opts)
2269 opts = _byteskwargs(opts)
2262 x = int(x)
2270 x = int(x)
2263 timer, fm = gettimer(ui, opts)
2271 timer, fm = gettimer(ui, opts)
2264
2272
2265 def d():
2273 def d():
2266 len(repo[x].files())
2274 len(repo[x].files())
2267
2275
2268 timer(d)
2276 timer(d)
2269 fm.end()
2277 fm.end()
2270
2278
2271
2279
2272 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2280 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2273 def perfrawfiles(ui, repo, x, **opts):
2281 def perfrawfiles(ui, repo, x, **opts):
2274 opts = _byteskwargs(opts)
2282 opts = _byteskwargs(opts)
2275 x = int(x)
2283 x = int(x)
2276 timer, fm = gettimer(ui, opts)
2284 timer, fm = gettimer(ui, opts)
2277 cl = repo.changelog
2285 cl = repo.changelog
2278
2286
2279 def d():
2287 def d():
2280 len(cl.read(x)[3])
2288 len(cl.read(x)[3])
2281
2289
2282 timer(d)
2290 timer(d)
2283 fm.end()
2291 fm.end()
2284
2292
2285
2293
2286 @command(b'perf::lookup|perflookup', formatteropts)
2294 @command(b'perf::lookup|perflookup', formatteropts)
2287 def perflookup(ui, repo, rev, **opts):
2295 def perflookup(ui, repo, rev, **opts):
2288 opts = _byteskwargs(opts)
2296 opts = _byteskwargs(opts)
2289 timer, fm = gettimer(ui, opts)
2297 timer, fm = gettimer(ui, opts)
2290 timer(lambda: len(repo.lookup(rev)))
2298 timer(lambda: len(repo.lookup(rev)))
2291 fm.end()
2299 fm.end()
2292
2300
2293
2301
2294 @command(
2302 @command(
2295 b'perf::linelogedits|perflinelogedits',
2303 b'perf::linelogedits|perflinelogedits',
2296 [
2304 [
2297 (b'n', b'edits', 10000, b'number of edits'),
2305 (b'n', b'edits', 10000, b'number of edits'),
2298 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2306 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2299 ],
2307 ],
2300 norepo=True,
2308 norepo=True,
2301 )
2309 )
2302 def perflinelogedits(ui, **opts):
2310 def perflinelogedits(ui, **opts):
2303 from mercurial import linelog
2311 from mercurial import linelog
2304
2312
2305 opts = _byteskwargs(opts)
2313 opts = _byteskwargs(opts)
2306
2314
2307 edits = opts[b'edits']
2315 edits = opts[b'edits']
2308 maxhunklines = opts[b'max_hunk_lines']
2316 maxhunklines = opts[b'max_hunk_lines']
2309
2317
2310 maxb1 = 100000
2318 maxb1 = 100000
2311 random.seed(0)
2319 random.seed(0)
2312 randint = random.randint
2320 randint = random.randint
2313 currentlines = 0
2321 currentlines = 0
2314 arglist = []
2322 arglist = []
2315 for rev in _xrange(edits):
2323 for rev in _xrange(edits):
2316 a1 = randint(0, currentlines)
2324 a1 = randint(0, currentlines)
2317 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2325 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2318 b1 = randint(0, maxb1)
2326 b1 = randint(0, maxb1)
2319 b2 = randint(b1, b1 + maxhunklines)
2327 b2 = randint(b1, b1 + maxhunklines)
2320 currentlines += (b2 - b1) - (a2 - a1)
2328 currentlines += (b2 - b1) - (a2 - a1)
2321 arglist.append((rev, a1, a2, b1, b2))
2329 arglist.append((rev, a1, a2, b1, b2))
2322
2330
2323 def d():
2331 def d():
2324 ll = linelog.linelog()
2332 ll = linelog.linelog()
2325 for args in arglist:
2333 for args in arglist:
2326 ll.replacelines(*args)
2334 ll.replacelines(*args)
2327
2335
2328 timer, fm = gettimer(ui, opts)
2336 timer, fm = gettimer(ui, opts)
2329 timer(d)
2337 timer(d)
2330 fm.end()
2338 fm.end()
2331
2339
2332
2340
2333 @command(b'perf::revrange|perfrevrange', formatteropts)
2341 @command(b'perf::revrange|perfrevrange', formatteropts)
2334 def perfrevrange(ui, repo, *specs, **opts):
2342 def perfrevrange(ui, repo, *specs, **opts):
2335 opts = _byteskwargs(opts)
2343 opts = _byteskwargs(opts)
2336 timer, fm = gettimer(ui, opts)
2344 timer, fm = gettimer(ui, opts)
2337 revrange = scmutil.revrange
2345 revrange = scmutil.revrange
2338 timer(lambda: len(revrange(repo, specs)))
2346 timer(lambda: len(revrange(repo, specs)))
2339 fm.end()
2347 fm.end()
2340
2348
2341
2349
2342 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2350 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2343 def perfnodelookup(ui, repo, rev, **opts):
2351 def perfnodelookup(ui, repo, rev, **opts):
2344 opts = _byteskwargs(opts)
2352 opts = _byteskwargs(opts)
2345 timer, fm = gettimer(ui, opts)
2353 timer, fm = gettimer(ui, opts)
2346 import mercurial.revlog
2354 import mercurial.revlog
2347
2355
2348 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2356 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2349 n = scmutil.revsingle(repo, rev).node()
2357 n = scmutil.revsingle(repo, rev).node()
2350
2358
2351 try:
2359 try:
2352 cl = revlog(getsvfs(repo), radix=b"00changelog")
2360 cl = revlog(getsvfs(repo), radix=b"00changelog")
2353 except TypeError:
2361 except TypeError:
2354 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2362 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2355
2363
2356 def d():
2364 def d():
2357 cl.rev(n)
2365 cl.rev(n)
2358 clearcaches(cl)
2366 clearcaches(cl)
2359
2367
2360 timer(d)
2368 timer(d)
2361 fm.end()
2369 fm.end()
2362
2370
2363
2371
2364 @command(
2372 @command(
2365 b'perf::log|perflog',
2373 b'perf::log|perflog',
2366 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2374 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2367 )
2375 )
2368 def perflog(ui, repo, rev=None, **opts):
2376 def perflog(ui, repo, rev=None, **opts):
2369 opts = _byteskwargs(opts)
2377 opts = _byteskwargs(opts)
2370 if rev is None:
2378 if rev is None:
2371 rev = []
2379 rev = []
2372 timer, fm = gettimer(ui, opts)
2380 timer, fm = gettimer(ui, opts)
2373 ui.pushbuffer()
2381 ui.pushbuffer()
2374 timer(
2382 timer(
2375 lambda: commands.log(
2383 lambda: commands.log(
2376 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2384 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2377 )
2385 )
2378 )
2386 )
2379 ui.popbuffer()
2387 ui.popbuffer()
2380 fm.end()
2388 fm.end()
2381
2389
2382
2390
2383 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2391 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2384 def perfmoonwalk(ui, repo, **opts):
2392 def perfmoonwalk(ui, repo, **opts):
2385 """benchmark walking the changelog backwards
2393 """benchmark walking the changelog backwards
2386
2394
2387 This also loads the changelog data for each revision in the changelog.
2395 This also loads the changelog data for each revision in the changelog.
2388 """
2396 """
2389 opts = _byteskwargs(opts)
2397 opts = _byteskwargs(opts)
2390 timer, fm = gettimer(ui, opts)
2398 timer, fm = gettimer(ui, opts)
2391
2399
2392 def moonwalk():
2400 def moonwalk():
2393 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2401 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2394 ctx = repo[i]
2402 ctx = repo[i]
2395 ctx.branch() # read changelog data (in addition to the index)
2403 ctx.branch() # read changelog data (in addition to the index)
2396
2404
2397 timer(moonwalk)
2405 timer(moonwalk)
2398 fm.end()
2406 fm.end()
2399
2407
2400
2408
2401 @command(
2409 @command(
2402 b'perf::templating|perftemplating',
2410 b'perf::templating|perftemplating',
2403 [
2411 [
2404 (b'r', b'rev', [], b'revisions to run the template on'),
2412 (b'r', b'rev', [], b'revisions to run the template on'),
2405 ]
2413 ]
2406 + formatteropts,
2414 + formatteropts,
2407 )
2415 )
2408 def perftemplating(ui, repo, testedtemplate=None, **opts):
2416 def perftemplating(ui, repo, testedtemplate=None, **opts):
2409 """test the rendering time of a given template"""
2417 """test the rendering time of a given template"""
2410 if makelogtemplater is None:
2418 if makelogtemplater is None:
2411 raise error.Abort(
2419 raise error.Abort(
2412 b"perftemplating not available with this Mercurial",
2420 b"perftemplating not available with this Mercurial",
2413 hint=b"use 4.3 or later",
2421 hint=b"use 4.3 or later",
2414 )
2422 )
2415
2423
2416 opts = _byteskwargs(opts)
2424 opts = _byteskwargs(opts)
2417
2425
2418 nullui = ui.copy()
2426 nullui = ui.copy()
2419 nullui.fout = open(os.devnull, 'wb')
2427 nullui.fout = open(os.devnull, 'wb')
2420 nullui.disablepager()
2428 nullui.disablepager()
2421 revs = opts.get(b'rev')
2429 revs = opts.get(b'rev')
2422 if not revs:
2430 if not revs:
2423 revs = [b'all()']
2431 revs = [b'all()']
2424 revs = list(scmutil.revrange(repo, revs))
2432 revs = list(scmutil.revrange(repo, revs))
2425
2433
2426 defaulttemplate = (
2434 defaulttemplate = (
2427 b'{date|shortdate} [{rev}:{node|short}]'
2435 b'{date|shortdate} [{rev}:{node|short}]'
2428 b' {author|person}: {desc|firstline}\n'
2436 b' {author|person}: {desc|firstline}\n'
2429 )
2437 )
2430 if testedtemplate is None:
2438 if testedtemplate is None:
2431 testedtemplate = defaulttemplate
2439 testedtemplate = defaulttemplate
2432 displayer = makelogtemplater(nullui, repo, testedtemplate)
2440 displayer = makelogtemplater(nullui, repo, testedtemplate)
2433
2441
2434 def format():
2442 def format():
2435 for r in revs:
2443 for r in revs:
2436 ctx = repo[r]
2444 ctx = repo[r]
2437 displayer.show(ctx)
2445 displayer.show(ctx)
2438 displayer.flush(ctx)
2446 displayer.flush(ctx)
2439
2447
2440 timer, fm = gettimer(ui, opts)
2448 timer, fm = gettimer(ui, opts)
2441 timer(format)
2449 timer(format)
2442 fm.end()
2450 fm.end()
2443
2451
2444
2452
2445 def _displaystats(ui, opts, entries, data):
2453 def _displaystats(ui, opts, entries, data):
2446 # use a second formatter because the data are quite different, not sure
2454 # use a second formatter because the data are quite different, not sure
2447 # how it flies with the templater.
2455 # how it flies with the templater.
2448 fm = ui.formatter(b'perf-stats', opts)
2456 fm = ui.formatter(b'perf-stats', opts)
2449 for key, title in entries:
2457 for key, title in entries:
2450 values = data[key]
2458 values = data[key]
2451 nbvalues = len(data)
2459 nbvalues = len(data)
2452 values.sort()
2460 values.sort()
2453 stats = {
2461 stats = {
2454 'key': key,
2462 'key': key,
2455 'title': title,
2463 'title': title,
2456 'nbitems': len(values),
2464 'nbitems': len(values),
2457 'min': values[0][0],
2465 'min': values[0][0],
2458 '10%': values[(nbvalues * 10) // 100][0],
2466 '10%': values[(nbvalues * 10) // 100][0],
2459 '25%': values[(nbvalues * 25) // 100][0],
2467 '25%': values[(nbvalues * 25) // 100][0],
2460 '50%': values[(nbvalues * 50) // 100][0],
2468 '50%': values[(nbvalues * 50) // 100][0],
2461 '75%': values[(nbvalues * 75) // 100][0],
2469 '75%': values[(nbvalues * 75) // 100][0],
2462 '80%': values[(nbvalues * 80) // 100][0],
2470 '80%': values[(nbvalues * 80) // 100][0],
2463 '85%': values[(nbvalues * 85) // 100][0],
2471 '85%': values[(nbvalues * 85) // 100][0],
2464 '90%': values[(nbvalues * 90) // 100][0],
2472 '90%': values[(nbvalues * 90) // 100][0],
2465 '95%': values[(nbvalues * 95) // 100][0],
2473 '95%': values[(nbvalues * 95) // 100][0],
2466 '99%': values[(nbvalues * 99) // 100][0],
2474 '99%': values[(nbvalues * 99) // 100][0],
2467 'max': values[-1][0],
2475 'max': values[-1][0],
2468 }
2476 }
2469 fm.startitem()
2477 fm.startitem()
2470 fm.data(**stats)
2478 fm.data(**stats)
2471 # make node pretty for the human output
2479 # make node pretty for the human output
2472 fm.plain('### %s (%d items)\n' % (title, len(values)))
2480 fm.plain('### %s (%d items)\n' % (title, len(values)))
2473 lines = [
2481 lines = [
2474 'min',
2482 'min',
2475 '10%',
2483 '10%',
2476 '25%',
2484 '25%',
2477 '50%',
2485 '50%',
2478 '75%',
2486 '75%',
2479 '80%',
2487 '80%',
2480 '85%',
2488 '85%',
2481 '90%',
2489 '90%',
2482 '95%',
2490 '95%',
2483 '99%',
2491 '99%',
2484 'max',
2492 'max',
2485 ]
2493 ]
2486 for l in lines:
2494 for l in lines:
2487 fm.plain('%s: %s\n' % (l, stats[l]))
2495 fm.plain('%s: %s\n' % (l, stats[l]))
2488 fm.end()
2496 fm.end()
2489
2497
2490
2498
2491 @command(
2499 @command(
2492 b'perf::helper-mergecopies|perfhelper-mergecopies',
2500 b'perf::helper-mergecopies|perfhelper-mergecopies',
2493 formatteropts
2501 formatteropts
2494 + [
2502 + [
2495 (b'r', b'revs', [], b'restrict search to these revisions'),
2503 (b'r', b'revs', [], b'restrict search to these revisions'),
2496 (b'', b'timing', False, b'provides extra data (costly)'),
2504 (b'', b'timing', False, b'provides extra data (costly)'),
2497 (b'', b'stats', False, b'provides statistic about the measured data'),
2505 (b'', b'stats', False, b'provides statistic about the measured data'),
2498 ],
2506 ],
2499 )
2507 )
2500 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2508 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2501 """find statistics about potential parameters for `perfmergecopies`
2509 """find statistics about potential parameters for `perfmergecopies`
2502
2510
2503 This command find (base, p1, p2) triplet relevant for copytracing
2511 This command find (base, p1, p2) triplet relevant for copytracing
2504 benchmarking in the context of a merge. It reports values for some of the
2512 benchmarking in the context of a merge. It reports values for some of the
2505 parameters that impact merge copy tracing time during merge.
2513 parameters that impact merge copy tracing time during merge.
2506
2514
2507 If `--timing` is set, rename detection is run and the associated timing
2515 If `--timing` is set, rename detection is run and the associated timing
2508 will be reported. The extra details come at the cost of slower command
2516 will be reported. The extra details come at the cost of slower command
2509 execution.
2517 execution.
2510
2518
2511 Since rename detection is only run once, other factors might easily
2519 Since rename detection is only run once, other factors might easily
2512 affect the precision of the timing. However it should give a good
2520 affect the precision of the timing. However it should give a good
2513 approximation of which revision triplets are very costly.
2521 approximation of which revision triplets are very costly.
2514 """
2522 """
2515 opts = _byteskwargs(opts)
2523 opts = _byteskwargs(opts)
2516 fm = ui.formatter(b'perf', opts)
2524 fm = ui.formatter(b'perf', opts)
2517 dotiming = opts[b'timing']
2525 dotiming = opts[b'timing']
2518 dostats = opts[b'stats']
2526 dostats = opts[b'stats']
2519
2527
2520 output_template = [
2528 output_template = [
2521 ("base", "%(base)12s"),
2529 ("base", "%(base)12s"),
2522 ("p1", "%(p1.node)12s"),
2530 ("p1", "%(p1.node)12s"),
2523 ("p2", "%(p2.node)12s"),
2531 ("p2", "%(p2.node)12s"),
2524 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2532 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2525 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2533 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2526 ("p1.renames", "%(p1.renamedfiles)12d"),
2534 ("p1.renames", "%(p1.renamedfiles)12d"),
2527 ("p1.time", "%(p1.time)12.3f"),
2535 ("p1.time", "%(p1.time)12.3f"),
2528 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2536 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2529 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2537 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2530 ("p2.renames", "%(p2.renamedfiles)12d"),
2538 ("p2.renames", "%(p2.renamedfiles)12d"),
2531 ("p2.time", "%(p2.time)12.3f"),
2539 ("p2.time", "%(p2.time)12.3f"),
2532 ("renames", "%(nbrenamedfiles)12d"),
2540 ("renames", "%(nbrenamedfiles)12d"),
2533 ("total.time", "%(time)12.3f"),
2541 ("total.time", "%(time)12.3f"),
2534 ]
2542 ]
2535 if not dotiming:
2543 if not dotiming:
2536 output_template = [
2544 output_template = [
2537 i
2545 i
2538 for i in output_template
2546 for i in output_template
2539 if not ('time' in i[0] or 'renames' in i[0])
2547 if not ('time' in i[0] or 'renames' in i[0])
2540 ]
2548 ]
2541 header_names = [h for (h, v) in output_template]
2549 header_names = [h for (h, v) in output_template]
2542 output = ' '.join([v for (h, v) in output_template]) + '\n'
2550 output = ' '.join([v for (h, v) in output_template]) + '\n'
2543 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2551 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2544 fm.plain(header % tuple(header_names))
2552 fm.plain(header % tuple(header_names))
2545
2553
2546 if not revs:
2554 if not revs:
2547 revs = ['all()']
2555 revs = ['all()']
2548 revs = scmutil.revrange(repo, revs)
2556 revs = scmutil.revrange(repo, revs)
2549
2557
2550 if dostats:
2558 if dostats:
2551 alldata = {
2559 alldata = {
2552 'nbrevs': [],
2560 'nbrevs': [],
2553 'nbmissingfiles': [],
2561 'nbmissingfiles': [],
2554 }
2562 }
2555 if dotiming:
2563 if dotiming:
2556 alldata['parentnbrenames'] = []
2564 alldata['parentnbrenames'] = []
2557 alldata['totalnbrenames'] = []
2565 alldata['totalnbrenames'] = []
2558 alldata['parenttime'] = []
2566 alldata['parenttime'] = []
2559 alldata['totaltime'] = []
2567 alldata['totaltime'] = []
2560
2568
2561 roi = repo.revs('merge() and %ld', revs)
2569 roi = repo.revs('merge() and %ld', revs)
2562 for r in roi:
2570 for r in roi:
2563 ctx = repo[r]
2571 ctx = repo[r]
2564 p1 = ctx.p1()
2572 p1 = ctx.p1()
2565 p2 = ctx.p2()
2573 p2 = ctx.p2()
2566 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2574 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2567 for b in bases:
2575 for b in bases:
2568 b = repo[b]
2576 b = repo[b]
2569 p1missing = copies._computeforwardmissing(b, p1)
2577 p1missing = copies._computeforwardmissing(b, p1)
2570 p2missing = copies._computeforwardmissing(b, p2)
2578 p2missing = copies._computeforwardmissing(b, p2)
2571 data = {
2579 data = {
2572 b'base': b.hex(),
2580 b'base': b.hex(),
2573 b'p1.node': p1.hex(),
2581 b'p1.node': p1.hex(),
2574 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2582 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2575 b'p1.nbmissingfiles': len(p1missing),
2583 b'p1.nbmissingfiles': len(p1missing),
2576 b'p2.node': p2.hex(),
2584 b'p2.node': p2.hex(),
2577 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2585 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2578 b'p2.nbmissingfiles': len(p2missing),
2586 b'p2.nbmissingfiles': len(p2missing),
2579 }
2587 }
2580 if dostats:
2588 if dostats:
2581 if p1missing:
2589 if p1missing:
2582 alldata['nbrevs'].append(
2590 alldata['nbrevs'].append(
2583 (data['p1.nbrevs'], b.hex(), p1.hex())
2591 (data['p1.nbrevs'], b.hex(), p1.hex())
2584 )
2592 )
2585 alldata['nbmissingfiles'].append(
2593 alldata['nbmissingfiles'].append(
2586 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2594 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2587 )
2595 )
2588 if p2missing:
2596 if p2missing:
2589 alldata['nbrevs'].append(
2597 alldata['nbrevs'].append(
2590 (data['p2.nbrevs'], b.hex(), p2.hex())
2598 (data['p2.nbrevs'], b.hex(), p2.hex())
2591 )
2599 )
2592 alldata['nbmissingfiles'].append(
2600 alldata['nbmissingfiles'].append(
2593 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2601 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2594 )
2602 )
2595 if dotiming:
2603 if dotiming:
2596 begin = util.timer()
2604 begin = util.timer()
2597 mergedata = copies.mergecopies(repo, p1, p2, b)
2605 mergedata = copies.mergecopies(repo, p1, p2, b)
2598 end = util.timer()
2606 end = util.timer()
2599 # not very stable timing since we did only one run
2607 # not very stable timing since we did only one run
2600 data['time'] = end - begin
2608 data['time'] = end - begin
2601 # mergedata contains five dicts: "copy", "movewithdir",
2609 # mergedata contains five dicts: "copy", "movewithdir",
2602 # "diverge", "renamedelete" and "dirmove".
2610 # "diverge", "renamedelete" and "dirmove".
2603 # The first 4 are about renamed file so lets count that.
2611 # The first 4 are about renamed file so lets count that.
2604 renames = len(mergedata[0])
2612 renames = len(mergedata[0])
2605 renames += len(mergedata[1])
2613 renames += len(mergedata[1])
2606 renames += len(mergedata[2])
2614 renames += len(mergedata[2])
2607 renames += len(mergedata[3])
2615 renames += len(mergedata[3])
2608 data['nbrenamedfiles'] = renames
2616 data['nbrenamedfiles'] = renames
2609 begin = util.timer()
2617 begin = util.timer()
2610 p1renames = copies.pathcopies(b, p1)
2618 p1renames = copies.pathcopies(b, p1)
2611 end = util.timer()
2619 end = util.timer()
2612 data['p1.time'] = end - begin
2620 data['p1.time'] = end - begin
2613 begin = util.timer()
2621 begin = util.timer()
2614 p2renames = copies.pathcopies(b, p2)
2622 p2renames = copies.pathcopies(b, p2)
2615 end = util.timer()
2623 end = util.timer()
2616 data['p2.time'] = end - begin
2624 data['p2.time'] = end - begin
2617 data['p1.renamedfiles'] = len(p1renames)
2625 data['p1.renamedfiles'] = len(p1renames)
2618 data['p2.renamedfiles'] = len(p2renames)
2626 data['p2.renamedfiles'] = len(p2renames)
2619
2627
2620 if dostats:
2628 if dostats:
2621 if p1missing:
2629 if p1missing:
2622 alldata['parentnbrenames'].append(
2630 alldata['parentnbrenames'].append(
2623 (data['p1.renamedfiles'], b.hex(), p1.hex())
2631 (data['p1.renamedfiles'], b.hex(), p1.hex())
2624 )
2632 )
2625 alldata['parenttime'].append(
2633 alldata['parenttime'].append(
2626 (data['p1.time'], b.hex(), p1.hex())
2634 (data['p1.time'], b.hex(), p1.hex())
2627 )
2635 )
2628 if p2missing:
2636 if p2missing:
2629 alldata['parentnbrenames'].append(
2637 alldata['parentnbrenames'].append(
2630 (data['p2.renamedfiles'], b.hex(), p2.hex())
2638 (data['p2.renamedfiles'], b.hex(), p2.hex())
2631 )
2639 )
2632 alldata['parenttime'].append(
2640 alldata['parenttime'].append(
2633 (data['p2.time'], b.hex(), p2.hex())
2641 (data['p2.time'], b.hex(), p2.hex())
2634 )
2642 )
2635 if p1missing or p2missing:
2643 if p1missing or p2missing:
2636 alldata['totalnbrenames'].append(
2644 alldata['totalnbrenames'].append(
2637 (
2645 (
2638 data['nbrenamedfiles'],
2646 data['nbrenamedfiles'],
2639 b.hex(),
2647 b.hex(),
2640 p1.hex(),
2648 p1.hex(),
2641 p2.hex(),
2649 p2.hex(),
2642 )
2650 )
2643 )
2651 )
2644 alldata['totaltime'].append(
2652 alldata['totaltime'].append(
2645 (data['time'], b.hex(), p1.hex(), p2.hex())
2653 (data['time'], b.hex(), p1.hex(), p2.hex())
2646 )
2654 )
2647 fm.startitem()
2655 fm.startitem()
2648 fm.data(**data)
2656 fm.data(**data)
2649 # make node pretty for the human output
2657 # make node pretty for the human output
2650 out = data.copy()
2658 out = data.copy()
2651 out['base'] = fm.hexfunc(b.node())
2659 out['base'] = fm.hexfunc(b.node())
2652 out['p1.node'] = fm.hexfunc(p1.node())
2660 out['p1.node'] = fm.hexfunc(p1.node())
2653 out['p2.node'] = fm.hexfunc(p2.node())
2661 out['p2.node'] = fm.hexfunc(p2.node())
2654 fm.plain(output % out)
2662 fm.plain(output % out)
2655
2663
2656 fm.end()
2664 fm.end()
2657 if dostats:
2665 if dostats:
2658 # use a second formatter because the data are quite different, not sure
2666 # use a second formatter because the data are quite different, not sure
2659 # how it flies with the templater.
2667 # how it flies with the templater.
2660 entries = [
2668 entries = [
2661 ('nbrevs', 'number of revision covered'),
2669 ('nbrevs', 'number of revision covered'),
2662 ('nbmissingfiles', 'number of missing files at head'),
2670 ('nbmissingfiles', 'number of missing files at head'),
2663 ]
2671 ]
2664 if dotiming:
2672 if dotiming:
2665 entries.append(
2673 entries.append(
2666 ('parentnbrenames', 'rename from one parent to base')
2674 ('parentnbrenames', 'rename from one parent to base')
2667 )
2675 )
2668 entries.append(('totalnbrenames', 'total number of renames'))
2676 entries.append(('totalnbrenames', 'total number of renames'))
2669 entries.append(('parenttime', 'time for one parent'))
2677 entries.append(('parenttime', 'time for one parent'))
2670 entries.append(('totaltime', 'time for both parents'))
2678 entries.append(('totaltime', 'time for both parents'))
2671 _displaystats(ui, opts, entries, alldata)
2679 _displaystats(ui, opts, entries, alldata)
2672
2680
2673
2681
2674 @command(
2682 @command(
2675 b'perf::helper-pathcopies|perfhelper-pathcopies',
2683 b'perf::helper-pathcopies|perfhelper-pathcopies',
2676 formatteropts
2684 formatteropts
2677 + [
2685 + [
2678 (b'r', b'revs', [], b'restrict search to these revisions'),
2686 (b'r', b'revs', [], b'restrict search to these revisions'),
2679 (b'', b'timing', False, b'provides extra data (costly)'),
2687 (b'', b'timing', False, b'provides extra data (costly)'),
2680 (b'', b'stats', False, b'provides statistic about the measured data'),
2688 (b'', b'stats', False, b'provides statistic about the measured data'),
2681 ],
2689 ],
2682 )
2690 )
2683 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2691 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2684 """find statistic about potential parameters for the `perftracecopies`
2692 """find statistic about potential parameters for the `perftracecopies`
2685
2693
2686 This command find source-destination pair relevant for copytracing testing.
2694 This command find source-destination pair relevant for copytracing testing.
2687 It report value for some of the parameters that impact copy tracing time.
2695 It report value for some of the parameters that impact copy tracing time.
2688
2696
2689 If `--timing` is set, rename detection is run and the associated timing
2697 If `--timing` is set, rename detection is run and the associated timing
2690 will be reported. The extra details comes at the cost of a slower command
2698 will be reported. The extra details comes at the cost of a slower command
2691 execution.
2699 execution.
2692
2700
2693 Since the rename detection is only run once, other factors might easily
2701 Since the rename detection is only run once, other factors might easily
2694 affect the precision of the timing. However it should give a good
2702 affect the precision of the timing. However it should give a good
2695 approximation of which revision pairs are very costly.
2703 approximation of which revision pairs are very costly.
2696 """
2704 """
2697 opts = _byteskwargs(opts)
2705 opts = _byteskwargs(opts)
2698 fm = ui.formatter(b'perf', opts)
2706 fm = ui.formatter(b'perf', opts)
2699 dotiming = opts[b'timing']
2707 dotiming = opts[b'timing']
2700 dostats = opts[b'stats']
2708 dostats = opts[b'stats']
2701
2709
2702 if dotiming:
2710 if dotiming:
2703 header = '%12s %12s %12s %12s %12s %12s\n'
2711 header = '%12s %12s %12s %12s %12s %12s\n'
2704 output = (
2712 output = (
2705 "%(source)12s %(destination)12s "
2713 "%(source)12s %(destination)12s "
2706 "%(nbrevs)12d %(nbmissingfiles)12d "
2714 "%(nbrevs)12d %(nbmissingfiles)12d "
2707 "%(nbrenamedfiles)12d %(time)18.5f\n"
2715 "%(nbrenamedfiles)12d %(time)18.5f\n"
2708 )
2716 )
2709 header_names = (
2717 header_names = (
2710 "source",
2718 "source",
2711 "destination",
2719 "destination",
2712 "nb-revs",
2720 "nb-revs",
2713 "nb-files",
2721 "nb-files",
2714 "nb-renames",
2722 "nb-renames",
2715 "time",
2723 "time",
2716 )
2724 )
2717 fm.plain(header % header_names)
2725 fm.plain(header % header_names)
2718 else:
2726 else:
2719 header = '%12s %12s %12s %12s\n'
2727 header = '%12s %12s %12s %12s\n'
2720 output = (
2728 output = (
2721 "%(source)12s %(destination)12s "
2729 "%(source)12s %(destination)12s "
2722 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2730 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2723 )
2731 )
2724 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2732 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2725
2733
2726 if not revs:
2734 if not revs:
2727 revs = ['all()']
2735 revs = ['all()']
2728 revs = scmutil.revrange(repo, revs)
2736 revs = scmutil.revrange(repo, revs)
2729
2737
2730 if dostats:
2738 if dostats:
2731 alldata = {
2739 alldata = {
2732 'nbrevs': [],
2740 'nbrevs': [],
2733 'nbmissingfiles': [],
2741 'nbmissingfiles': [],
2734 }
2742 }
2735 if dotiming:
2743 if dotiming:
2736 alldata['nbrenames'] = []
2744 alldata['nbrenames'] = []
2737 alldata['time'] = []
2745 alldata['time'] = []
2738
2746
2739 roi = repo.revs('merge() and %ld', revs)
2747 roi = repo.revs('merge() and %ld', revs)
2740 for r in roi:
2748 for r in roi:
2741 ctx = repo[r]
2749 ctx = repo[r]
2742 p1 = ctx.p1().rev()
2750 p1 = ctx.p1().rev()
2743 p2 = ctx.p2().rev()
2751 p2 = ctx.p2().rev()
2744 bases = repo.changelog._commonancestorsheads(p1, p2)
2752 bases = repo.changelog._commonancestorsheads(p1, p2)
2745 for p in (p1, p2):
2753 for p in (p1, p2):
2746 for b in bases:
2754 for b in bases:
2747 base = repo[b]
2755 base = repo[b]
2748 parent = repo[p]
2756 parent = repo[p]
2749 missing = copies._computeforwardmissing(base, parent)
2757 missing = copies._computeforwardmissing(base, parent)
2750 if not missing:
2758 if not missing:
2751 continue
2759 continue
2752 data = {
2760 data = {
2753 b'source': base.hex(),
2761 b'source': base.hex(),
2754 b'destination': parent.hex(),
2762 b'destination': parent.hex(),
2755 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2763 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2756 b'nbmissingfiles': len(missing),
2764 b'nbmissingfiles': len(missing),
2757 }
2765 }
2758 if dostats:
2766 if dostats:
2759 alldata['nbrevs'].append(
2767 alldata['nbrevs'].append(
2760 (
2768 (
2761 data['nbrevs'],
2769 data['nbrevs'],
2762 base.hex(),
2770 base.hex(),
2763 parent.hex(),
2771 parent.hex(),
2764 )
2772 )
2765 )
2773 )
2766 alldata['nbmissingfiles'].append(
2774 alldata['nbmissingfiles'].append(
2767 (
2775 (
2768 data['nbmissingfiles'],
2776 data['nbmissingfiles'],
2769 base.hex(),
2777 base.hex(),
2770 parent.hex(),
2778 parent.hex(),
2771 )
2779 )
2772 )
2780 )
2773 if dotiming:
2781 if dotiming:
2774 begin = util.timer()
2782 begin = util.timer()
2775 renames = copies.pathcopies(base, parent)
2783 renames = copies.pathcopies(base, parent)
2776 end = util.timer()
2784 end = util.timer()
2777 # not very stable timing since we did only one run
2785 # not very stable timing since we did only one run
2778 data['time'] = end - begin
2786 data['time'] = end - begin
2779 data['nbrenamedfiles'] = len(renames)
2787 data['nbrenamedfiles'] = len(renames)
2780 if dostats:
2788 if dostats:
2781 alldata['time'].append(
2789 alldata['time'].append(
2782 (
2790 (
2783 data['time'],
2791 data['time'],
2784 base.hex(),
2792 base.hex(),
2785 parent.hex(),
2793 parent.hex(),
2786 )
2794 )
2787 )
2795 )
2788 alldata['nbrenames'].append(
2796 alldata['nbrenames'].append(
2789 (
2797 (
2790 data['nbrenamedfiles'],
2798 data['nbrenamedfiles'],
2791 base.hex(),
2799 base.hex(),
2792 parent.hex(),
2800 parent.hex(),
2793 )
2801 )
2794 )
2802 )
2795 fm.startitem()
2803 fm.startitem()
2796 fm.data(**data)
2804 fm.data(**data)
2797 out = data.copy()
2805 out = data.copy()
2798 out['source'] = fm.hexfunc(base.node())
2806 out['source'] = fm.hexfunc(base.node())
2799 out['destination'] = fm.hexfunc(parent.node())
2807 out['destination'] = fm.hexfunc(parent.node())
2800 fm.plain(output % out)
2808 fm.plain(output % out)
2801
2809
2802 fm.end()
2810 fm.end()
2803 if dostats:
2811 if dostats:
2804 entries = [
2812 entries = [
2805 ('nbrevs', 'number of revision covered'),
2813 ('nbrevs', 'number of revision covered'),
2806 ('nbmissingfiles', 'number of missing files at head'),
2814 ('nbmissingfiles', 'number of missing files at head'),
2807 ]
2815 ]
2808 if dotiming:
2816 if dotiming:
2809 entries.append(('nbrenames', 'renamed files'))
2817 entries.append(('nbrenames', 'renamed files'))
2810 entries.append(('time', 'time'))
2818 entries.append(('time', 'time'))
2811 _displaystats(ui, opts, entries, alldata)
2819 _displaystats(ui, opts, entries, alldata)
2812
2820
2813
2821
2814 @command(b'perf::cca|perfcca', formatteropts)
2822 @command(b'perf::cca|perfcca', formatteropts)
2815 def perfcca(ui, repo, **opts):
2823 def perfcca(ui, repo, **opts):
2816 opts = _byteskwargs(opts)
2824 opts = _byteskwargs(opts)
2817 timer, fm = gettimer(ui, opts)
2825 timer, fm = gettimer(ui, opts)
2818 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2826 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2819 fm.end()
2827 fm.end()
2820
2828
2821
2829
2822 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2830 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2823 def perffncacheload(ui, repo, **opts):
2831 def perffncacheload(ui, repo, **opts):
2824 opts = _byteskwargs(opts)
2832 opts = _byteskwargs(opts)
2825 timer, fm = gettimer(ui, opts)
2833 timer, fm = gettimer(ui, opts)
2826 s = repo.store
2834 s = repo.store
2827
2835
2828 def d():
2836 def d():
2829 s.fncache._load()
2837 s.fncache._load()
2830
2838
2831 timer(d)
2839 timer(d)
2832 fm.end()
2840 fm.end()
2833
2841
2834
2842
2835 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2843 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2836 def perffncachewrite(ui, repo, **opts):
2844 def perffncachewrite(ui, repo, **opts):
2837 opts = _byteskwargs(opts)
2845 opts = _byteskwargs(opts)
2838 timer, fm = gettimer(ui, opts)
2846 timer, fm = gettimer(ui, opts)
2839 s = repo.store
2847 s = repo.store
2840 lock = repo.lock()
2848 lock = repo.lock()
2841 s.fncache._load()
2849 s.fncache._load()
2842 tr = repo.transaction(b'perffncachewrite')
2850 tr = repo.transaction(b'perffncachewrite')
2843 tr.addbackup(b'fncache')
2851 tr.addbackup(b'fncache')
2844
2852
2845 def d():
2853 def d():
2846 s.fncache._dirty = True
2854 s.fncache._dirty = True
2847 s.fncache.write(tr)
2855 s.fncache.write(tr)
2848
2856
2849 timer(d)
2857 timer(d)
2850 tr.close()
2858 tr.close()
2851 lock.release()
2859 lock.release()
2852 fm.end()
2860 fm.end()
2853
2861
2854
2862
2855 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2863 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2856 def perffncacheencode(ui, repo, **opts):
2864 def perffncacheencode(ui, repo, **opts):
2857 opts = _byteskwargs(opts)
2865 opts = _byteskwargs(opts)
2858 timer, fm = gettimer(ui, opts)
2866 timer, fm = gettimer(ui, opts)
2859 s = repo.store
2867 s = repo.store
2860 s.fncache._load()
2868 s.fncache._load()
2861
2869
2862 def d():
2870 def d():
2863 for p in s.fncache.entries:
2871 for p in s.fncache.entries:
2864 s.encode(p)
2872 s.encode(p)
2865
2873
2866 timer(d)
2874 timer(d)
2867 fm.end()
2875 fm.end()
2868
2876
2869
2877
2870 def _bdiffworker(q, blocks, xdiff, ready, done):
2878 def _bdiffworker(q, blocks, xdiff, ready, done):
2871 while not done.is_set():
2879 while not done.is_set():
2872 pair = q.get()
2880 pair = q.get()
2873 while pair is not None:
2881 while pair is not None:
2874 if xdiff:
2882 if xdiff:
2875 mdiff.bdiff.xdiffblocks(*pair)
2883 mdiff.bdiff.xdiffblocks(*pair)
2876 elif blocks:
2884 elif blocks:
2877 mdiff.bdiff.blocks(*pair)
2885 mdiff.bdiff.blocks(*pair)
2878 else:
2886 else:
2879 mdiff.textdiff(*pair)
2887 mdiff.textdiff(*pair)
2880 q.task_done()
2888 q.task_done()
2881 pair = q.get()
2889 pair = q.get()
2882 q.task_done() # for the None one
2890 q.task_done() # for the None one
2883 with ready:
2891 with ready:
2884 ready.wait()
2892 ready.wait()
2885
2893
2886
2894
2887 def _manifestrevision(repo, mnode):
2895 def _manifestrevision(repo, mnode):
2888 ml = repo.manifestlog
2896 ml = repo.manifestlog
2889
2897
2890 if util.safehasattr(ml, b'getstorage'):
2898 if util.safehasattr(ml, b'getstorage'):
2891 store = ml.getstorage(b'')
2899 store = ml.getstorage(b'')
2892 else:
2900 else:
2893 store = ml._revlog
2901 store = ml._revlog
2894
2902
2895 return store.revision(mnode)
2903 return store.revision(mnode)
2896
2904
2897
2905
2898 @command(
2906 @command(
2899 b'perf::bdiff|perfbdiff',
2907 b'perf::bdiff|perfbdiff',
2900 revlogopts
2908 revlogopts
2901 + formatteropts
2909 + formatteropts
2902 + [
2910 + [
2903 (
2911 (
2904 b'',
2912 b'',
2905 b'count',
2913 b'count',
2906 1,
2914 1,
2907 b'number of revisions to test (when using --startrev)',
2915 b'number of revisions to test (when using --startrev)',
2908 ),
2916 ),
2909 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2917 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2910 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2918 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2911 (b'', b'blocks', False, b'test computing diffs into blocks'),
2919 (b'', b'blocks', False, b'test computing diffs into blocks'),
2912 (b'', b'xdiff', False, b'use xdiff algorithm'),
2920 (b'', b'xdiff', False, b'use xdiff algorithm'),
2913 ],
2921 ],
2914 b'-c|-m|FILE REV',
2922 b'-c|-m|FILE REV',
2915 )
2923 )
2916 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2924 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2917 """benchmark a bdiff between revisions
2925 """benchmark a bdiff between revisions
2918
2926
2919 By default, benchmark a bdiff between its delta parent and itself.
2927 By default, benchmark a bdiff between its delta parent and itself.
2920
2928
2921 With ``--count``, benchmark bdiffs between delta parents and self for N
2929 With ``--count``, benchmark bdiffs between delta parents and self for N
2922 revisions starting at the specified revision.
2930 revisions starting at the specified revision.
2923
2931
2924 With ``--alldata``, assume the requested revision is a changeset and
2932 With ``--alldata``, assume the requested revision is a changeset and
2925 measure bdiffs for all changes related to that changeset (manifest
2933 measure bdiffs for all changes related to that changeset (manifest
2926 and filelogs).
2934 and filelogs).
2927 """
2935 """
2928 opts = _byteskwargs(opts)
2936 opts = _byteskwargs(opts)
2929
2937
2930 if opts[b'xdiff'] and not opts[b'blocks']:
2938 if opts[b'xdiff'] and not opts[b'blocks']:
2931 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2939 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2932
2940
2933 if opts[b'alldata']:
2941 if opts[b'alldata']:
2934 opts[b'changelog'] = True
2942 opts[b'changelog'] = True
2935
2943
2936 if opts.get(b'changelog') or opts.get(b'manifest'):
2944 if opts.get(b'changelog') or opts.get(b'manifest'):
2937 file_, rev = None, file_
2945 file_, rev = None, file_
2938 elif rev is None:
2946 elif rev is None:
2939 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2947 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2940
2948
2941 blocks = opts[b'blocks']
2949 blocks = opts[b'blocks']
2942 xdiff = opts[b'xdiff']
2950 xdiff = opts[b'xdiff']
2943 textpairs = []
2951 textpairs = []
2944
2952
2945 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2953 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2946
2954
2947 startrev = r.rev(r.lookup(rev))
2955 startrev = r.rev(r.lookup(rev))
2948 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2956 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2949 if opts[b'alldata']:
2957 if opts[b'alldata']:
2950 # Load revisions associated with changeset.
2958 # Load revisions associated with changeset.
2951 ctx = repo[rev]
2959 ctx = repo[rev]
2952 mtext = _manifestrevision(repo, ctx.manifestnode())
2960 mtext = _manifestrevision(repo, ctx.manifestnode())
2953 for pctx in ctx.parents():
2961 for pctx in ctx.parents():
2954 pman = _manifestrevision(repo, pctx.manifestnode())
2962 pman = _manifestrevision(repo, pctx.manifestnode())
2955 textpairs.append((pman, mtext))
2963 textpairs.append((pman, mtext))
2956
2964
2957 # Load filelog revisions by iterating manifest delta.
2965 # Load filelog revisions by iterating manifest delta.
2958 man = ctx.manifest()
2966 man = ctx.manifest()
2959 pman = ctx.p1().manifest()
2967 pman = ctx.p1().manifest()
2960 for filename, change in pman.diff(man).items():
2968 for filename, change in pman.diff(man).items():
2961 fctx = repo.file(filename)
2969 fctx = repo.file(filename)
2962 f1 = fctx.revision(change[0][0] or -1)
2970 f1 = fctx.revision(change[0][0] or -1)
2963 f2 = fctx.revision(change[1][0] or -1)
2971 f2 = fctx.revision(change[1][0] or -1)
2964 textpairs.append((f1, f2))
2972 textpairs.append((f1, f2))
2965 else:
2973 else:
2966 dp = r.deltaparent(rev)
2974 dp = r.deltaparent(rev)
2967 textpairs.append((r.revision(dp), r.revision(rev)))
2975 textpairs.append((r.revision(dp), r.revision(rev)))
2968
2976
2969 withthreads = threads > 0
2977 withthreads = threads > 0
2970 if not withthreads:
2978 if not withthreads:
2971
2979
2972 def d():
2980 def d():
2973 for pair in textpairs:
2981 for pair in textpairs:
2974 if xdiff:
2982 if xdiff:
2975 mdiff.bdiff.xdiffblocks(*pair)
2983 mdiff.bdiff.xdiffblocks(*pair)
2976 elif blocks:
2984 elif blocks:
2977 mdiff.bdiff.blocks(*pair)
2985 mdiff.bdiff.blocks(*pair)
2978 else:
2986 else:
2979 mdiff.textdiff(*pair)
2987 mdiff.textdiff(*pair)
2980
2988
2981 else:
2989 else:
2982 q = queue()
2990 q = queue()
2983 for i in _xrange(threads):
2991 for i in _xrange(threads):
2984 q.put(None)
2992 q.put(None)
2985 ready = threading.Condition()
2993 ready = threading.Condition()
2986 done = threading.Event()
2994 done = threading.Event()
2987 for i in _xrange(threads):
2995 for i in _xrange(threads):
2988 threading.Thread(
2996 threading.Thread(
2989 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2997 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2990 ).start()
2998 ).start()
2991 q.join()
2999 q.join()
2992
3000
2993 def d():
3001 def d():
2994 for pair in textpairs:
3002 for pair in textpairs:
2995 q.put(pair)
3003 q.put(pair)
2996 for i in _xrange(threads):
3004 for i in _xrange(threads):
2997 q.put(None)
3005 q.put(None)
2998 with ready:
3006 with ready:
2999 ready.notify_all()
3007 ready.notify_all()
3000 q.join()
3008 q.join()
3001
3009
3002 timer, fm = gettimer(ui, opts)
3010 timer, fm = gettimer(ui, opts)
3003 timer(d)
3011 timer(d)
3004 fm.end()
3012 fm.end()
3005
3013
3006 if withthreads:
3014 if withthreads:
3007 done.set()
3015 done.set()
3008 for i in _xrange(threads):
3016 for i in _xrange(threads):
3009 q.put(None)
3017 q.put(None)
3010 with ready:
3018 with ready:
3011 ready.notify_all()
3019 ready.notify_all()
3012
3020
3013
3021
3014 @command(
3022 @command(
3015 b'perf::unbundle',
3023 b'perf::unbundle',
3016 [
3024 [
3017 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3025 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3018 ]
3026 ]
3019 + formatteropts,
3027 + formatteropts,
3020 b'BUNDLE_FILE',
3028 b'BUNDLE_FILE',
3021 )
3029 )
3022 def perf_unbundle(ui, repo, fname, **opts):
3030 def perf_unbundle(ui, repo, fname, **opts):
3023 """benchmark application of a bundle in a repository.
3031 """benchmark application of a bundle in a repository.
3024
3032
3025 This does not include the final transaction processing
3033 This does not include the final transaction processing
3026
3034
3027 The --as-push option make the unbundle operation appears like it comes from
3035 The --as-push option make the unbundle operation appears like it comes from
3028 a client push. It change some aspect of the processing and associated
3036 a client push. It change some aspect of the processing and associated
3029 performance profile.
3037 performance profile.
3030 """
3038 """
3031
3039
3032 from mercurial import exchange
3040 from mercurial import exchange
3033 from mercurial import bundle2
3041 from mercurial import bundle2
3034 from mercurial import transaction
3042 from mercurial import transaction
3035
3043
3036 opts = _byteskwargs(opts)
3044 opts = _byteskwargs(opts)
3037
3045
3038 ### some compatibility hotfix
3046 ### some compatibility hotfix
3039 #
3047 #
3040 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3048 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3041 # critical regression that break transaction rollback for files that are
3049 # critical regression that break transaction rollback for files that are
3042 # de-inlined.
3050 # de-inlined.
3043 method = transaction.transaction._addentry
3051 method = transaction.transaction._addentry
3044 pre_63edc384d3b7 = "data" in getargspec(method).args
3052 pre_63edc384d3b7 = "data" in getargspec(method).args
3045 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3053 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3046 # a changeset that is a close descendant of 18415fc918a1, the changeset
3054 # a changeset that is a close descendant of 18415fc918a1, the changeset
3047 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3055 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3048 args = getargspec(error.Abort.__init__).args
3056 args = getargspec(error.Abort.__init__).args
3049 post_18415fc918a1 = "detailed_exit_code" in args
3057 post_18415fc918a1 = "detailed_exit_code" in args
3050
3058
3051 unbundle_source = b'perf::unbundle'
3059 unbundle_source = b'perf::unbundle'
3052 if opts[b'as_push']:
3060 if opts[b'as_push']:
3053 unbundle_source = b'push'
3061 unbundle_source = b'push'
3054
3062
3055 old_max_inline = None
3063 old_max_inline = None
3056 try:
3064 try:
3057 if not (pre_63edc384d3b7 or post_18415fc918a1):
3065 if not (pre_63edc384d3b7 or post_18415fc918a1):
3058 # disable inlining
3066 # disable inlining
3059 old_max_inline = mercurial.revlog._maxinline
3067 old_max_inline = mercurial.revlog._maxinline
3060 # large enough to never happen
3068 # large enough to never happen
3061 mercurial.revlog._maxinline = 2 ** 50
3069 mercurial.revlog._maxinline = 2 ** 50
3062
3070
3063 with repo.lock():
3071 with repo.lock():
3064 bundle = [None, None]
3072 bundle = [None, None]
3065 orig_quiet = repo.ui.quiet
3073 orig_quiet = repo.ui.quiet
3066 try:
3074 try:
3067 repo.ui.quiet = True
3075 repo.ui.quiet = True
3068 with open(fname, mode="rb") as f:
3076 with open(fname, mode="rb") as f:
3069
3077
3070 def noop_report(*args, **kwargs):
3078 def noop_report(*args, **kwargs):
3071 pass
3079 pass
3072
3080
3073 def setup():
3081 def setup():
3074 gen, tr = bundle
3082 gen, tr = bundle
3075 if tr is not None:
3083 if tr is not None:
3076 tr.abort()
3084 tr.abort()
3077 bundle[:] = [None, None]
3085 bundle[:] = [None, None]
3078 f.seek(0)
3086 f.seek(0)
3079 bundle[0] = exchange.readbundle(ui, f, fname)
3087 bundle[0] = exchange.readbundle(ui, f, fname)
3080 bundle[1] = repo.transaction(b'perf::unbundle')
3088 bundle[1] = repo.transaction(b'perf::unbundle')
3081 # silence the transaction
3089 # silence the transaction
3082 bundle[1]._report = noop_report
3090 bundle[1]._report = noop_report
3083
3091
3084 def apply():
3092 def apply():
3085 gen, tr = bundle
3093 gen, tr = bundle
3086 bundle2.applybundle(
3094 bundle2.applybundle(
3087 repo,
3095 repo,
3088 gen,
3096 gen,
3089 tr,
3097 tr,
3090 source=unbundle_source,
3098 source=unbundle_source,
3091 url=fname,
3099 url=fname,
3092 )
3100 )
3093
3101
3094 timer, fm = gettimer(ui, opts)
3102 timer, fm = gettimer(ui, opts)
3095 timer(apply, setup=setup)
3103 timer(apply, setup=setup)
3096 fm.end()
3104 fm.end()
3097 finally:
3105 finally:
3098 repo.ui.quiet == orig_quiet
3106 repo.ui.quiet == orig_quiet
3099 gen, tr = bundle
3107 gen, tr = bundle
3100 if tr is not None:
3108 if tr is not None:
3101 tr.abort()
3109 tr.abort()
3102 finally:
3110 finally:
3103 if old_max_inline is not None:
3111 if old_max_inline is not None:
3104 mercurial.revlog._maxinline = old_max_inline
3112 mercurial.revlog._maxinline = old_max_inline
3105
3113
3106
3114
3107 @command(
3115 @command(
3108 b'perf::unidiff|perfunidiff',
3116 b'perf::unidiff|perfunidiff',
3109 revlogopts
3117 revlogopts
3110 + formatteropts
3118 + formatteropts
3111 + [
3119 + [
3112 (
3120 (
3113 b'',
3121 b'',
3114 b'count',
3122 b'count',
3115 1,
3123 1,
3116 b'number of revisions to test (when using --startrev)',
3124 b'number of revisions to test (when using --startrev)',
3117 ),
3125 ),
3118 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3126 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3119 ],
3127 ],
3120 b'-c|-m|FILE REV',
3128 b'-c|-m|FILE REV',
3121 )
3129 )
3122 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3130 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3123 """benchmark a unified diff between revisions
3131 """benchmark a unified diff between revisions
3124
3132
3125 This doesn't include any copy tracing - it's just a unified diff
3133 This doesn't include any copy tracing - it's just a unified diff
3126 of the texts.
3134 of the texts.
3127
3135
3128 By default, benchmark a diff between its delta parent and itself.
3136 By default, benchmark a diff between its delta parent and itself.
3129
3137
3130 With ``--count``, benchmark diffs between delta parents and self for N
3138 With ``--count``, benchmark diffs between delta parents and self for N
3131 revisions starting at the specified revision.
3139 revisions starting at the specified revision.
3132
3140
3133 With ``--alldata``, assume the requested revision is a changeset and
3141 With ``--alldata``, assume the requested revision is a changeset and
3134 measure diffs for all changes related to that changeset (manifest
3142 measure diffs for all changes related to that changeset (manifest
3135 and filelogs).
3143 and filelogs).
3136 """
3144 """
3137 opts = _byteskwargs(opts)
3145 opts = _byteskwargs(opts)
3138 if opts[b'alldata']:
3146 if opts[b'alldata']:
3139 opts[b'changelog'] = True
3147 opts[b'changelog'] = True
3140
3148
3141 if opts.get(b'changelog') or opts.get(b'manifest'):
3149 if opts.get(b'changelog') or opts.get(b'manifest'):
3142 file_, rev = None, file_
3150 file_, rev = None, file_
3143 elif rev is None:
3151 elif rev is None:
3144 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3152 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3145
3153
3146 textpairs = []
3154 textpairs = []
3147
3155
3148 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3156 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3149
3157
3150 startrev = r.rev(r.lookup(rev))
3158 startrev = r.rev(r.lookup(rev))
3151 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3159 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3152 if opts[b'alldata']:
3160 if opts[b'alldata']:
3153 # Load revisions associated with changeset.
3161 # Load revisions associated with changeset.
3154 ctx = repo[rev]
3162 ctx = repo[rev]
3155 mtext = _manifestrevision(repo, ctx.manifestnode())
3163 mtext = _manifestrevision(repo, ctx.manifestnode())
3156 for pctx in ctx.parents():
3164 for pctx in ctx.parents():
3157 pman = _manifestrevision(repo, pctx.manifestnode())
3165 pman = _manifestrevision(repo, pctx.manifestnode())
3158 textpairs.append((pman, mtext))
3166 textpairs.append((pman, mtext))
3159
3167
3160 # Load filelog revisions by iterating manifest delta.
3168 # Load filelog revisions by iterating manifest delta.
3161 man = ctx.manifest()
3169 man = ctx.manifest()
3162 pman = ctx.p1().manifest()
3170 pman = ctx.p1().manifest()
3163 for filename, change in pman.diff(man).items():
3171 for filename, change in pman.diff(man).items():
3164 fctx = repo.file(filename)
3172 fctx = repo.file(filename)
3165 f1 = fctx.revision(change[0][0] or -1)
3173 f1 = fctx.revision(change[0][0] or -1)
3166 f2 = fctx.revision(change[1][0] or -1)
3174 f2 = fctx.revision(change[1][0] or -1)
3167 textpairs.append((f1, f2))
3175 textpairs.append((f1, f2))
3168 else:
3176 else:
3169 dp = r.deltaparent(rev)
3177 dp = r.deltaparent(rev)
3170 textpairs.append((r.revision(dp), r.revision(rev)))
3178 textpairs.append((r.revision(dp), r.revision(rev)))
3171
3179
3172 def d():
3180 def d():
3173 for left, right in textpairs:
3181 for left, right in textpairs:
3174 # The date strings don't matter, so we pass empty strings.
3182 # The date strings don't matter, so we pass empty strings.
3175 headerlines, hunks = mdiff.unidiff(
3183 headerlines, hunks = mdiff.unidiff(
3176 left, b'', right, b'', b'left', b'right', binary=False
3184 left, b'', right, b'', b'left', b'right', binary=False
3177 )
3185 )
3178 # consume iterators in roughly the way patch.py does
3186 # consume iterators in roughly the way patch.py does
3179 b'\n'.join(headerlines)
3187 b'\n'.join(headerlines)
3180 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3188 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3181
3189
3182 timer, fm = gettimer(ui, opts)
3190 timer, fm = gettimer(ui, opts)
3183 timer(d)
3191 timer(d)
3184 fm.end()
3192 fm.end()
3185
3193
3186
3194
3187 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3195 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3188 def perfdiffwd(ui, repo, **opts):
3196 def perfdiffwd(ui, repo, **opts):
3189 """Profile diff of working directory changes"""
3197 """Profile diff of working directory changes"""
3190 opts = _byteskwargs(opts)
3198 opts = _byteskwargs(opts)
3191 timer, fm = gettimer(ui, opts)
3199 timer, fm = gettimer(ui, opts)
3192 options = {
3200 options = {
3193 'w': 'ignore_all_space',
3201 'w': 'ignore_all_space',
3194 'b': 'ignore_space_change',
3202 'b': 'ignore_space_change',
3195 'B': 'ignore_blank_lines',
3203 'B': 'ignore_blank_lines',
3196 }
3204 }
3197
3205
3198 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3206 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3199 opts = {options[c]: b'1' for c in diffopt}
3207 opts = {options[c]: b'1' for c in diffopt}
3200
3208
3201 def d():
3209 def d():
3202 ui.pushbuffer()
3210 ui.pushbuffer()
3203 commands.diff(ui, repo, **opts)
3211 commands.diff(ui, repo, **opts)
3204 ui.popbuffer()
3212 ui.popbuffer()
3205
3213
3206 diffopt = diffopt.encode('ascii')
3214 diffopt = diffopt.encode('ascii')
3207 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3215 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3208 timer(d, title=title)
3216 timer(d, title=title)
3209 fm.end()
3217 fm.end()
3210
3218
3211
3219
3212 @command(
3220 @command(
3213 b'perf::revlogindex|perfrevlogindex',
3221 b'perf::revlogindex|perfrevlogindex',
3214 revlogopts + formatteropts,
3222 revlogopts + formatteropts,
3215 b'-c|-m|FILE',
3223 b'-c|-m|FILE',
3216 )
3224 )
3217 def perfrevlogindex(ui, repo, file_=None, **opts):
3225 def perfrevlogindex(ui, repo, file_=None, **opts):
3218 """Benchmark operations against a revlog index.
3226 """Benchmark operations against a revlog index.
3219
3227
3220 This tests constructing a revlog instance, reading index data,
3228 This tests constructing a revlog instance, reading index data,
3221 parsing index data, and performing various operations related to
3229 parsing index data, and performing various operations related to
3222 index data.
3230 index data.
3223 """
3231 """
3224
3232
3225 opts = _byteskwargs(opts)
3233 opts = _byteskwargs(opts)
3226
3234
3227 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3235 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3228
3236
3229 opener = getattr(rl, 'opener') # trick linter
3237 opener = getattr(rl, 'opener') # trick linter
3230 # compat with hg <= 5.8
3238 # compat with hg <= 5.8
3231 radix = getattr(rl, 'radix', None)
3239 radix = getattr(rl, 'radix', None)
3232 indexfile = getattr(rl, '_indexfile', None)
3240 indexfile = getattr(rl, '_indexfile', None)
3233 if indexfile is None:
3241 if indexfile is None:
3234 # compatibility with <= hg-5.8
3242 # compatibility with <= hg-5.8
3235 indexfile = getattr(rl, 'indexfile')
3243 indexfile = getattr(rl, 'indexfile')
3236 data = opener.read(indexfile)
3244 data = opener.read(indexfile)
3237
3245
3238 header = struct.unpack(b'>I', data[0:4])[0]
3246 header = struct.unpack(b'>I', data[0:4])[0]
3239 version = header & 0xFFFF
3247 version = header & 0xFFFF
3240 if version == 1:
3248 if version == 1:
3241 inline = header & (1 << 16)
3249 inline = header & (1 << 16)
3242 else:
3250 else:
3243 raise error.Abort(b'unsupported revlog version: %d' % version)
3251 raise error.Abort(b'unsupported revlog version: %d' % version)
3244
3252
3245 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3253 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3246 if parse_index_v1 is None:
3254 if parse_index_v1 is None:
3247 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3255 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3248
3256
3249 rllen = len(rl)
3257 rllen = len(rl)
3250
3258
3251 node0 = rl.node(0)
3259 node0 = rl.node(0)
3252 node25 = rl.node(rllen // 4)
3260 node25 = rl.node(rllen // 4)
3253 node50 = rl.node(rllen // 2)
3261 node50 = rl.node(rllen // 2)
3254 node75 = rl.node(rllen // 4 * 3)
3262 node75 = rl.node(rllen // 4 * 3)
3255 node100 = rl.node(rllen - 1)
3263 node100 = rl.node(rllen - 1)
3256
3264
3257 allrevs = range(rllen)
3265 allrevs = range(rllen)
3258 allrevsrev = list(reversed(allrevs))
3266 allrevsrev = list(reversed(allrevs))
3259 allnodes = [rl.node(rev) for rev in range(rllen)]
3267 allnodes = [rl.node(rev) for rev in range(rllen)]
3260 allnodesrev = list(reversed(allnodes))
3268 allnodesrev = list(reversed(allnodes))
3261
3269
3262 def constructor():
3270 def constructor():
3263 if radix is not None:
3271 if radix is not None:
3264 revlog(opener, radix=radix)
3272 revlog(opener, radix=radix)
3265 else:
3273 else:
3266 # hg <= 5.8
3274 # hg <= 5.8
3267 revlog(opener, indexfile=indexfile)
3275 revlog(opener, indexfile=indexfile)
3268
3276
3269 def read():
3277 def read():
3270 with opener(indexfile) as fh:
3278 with opener(indexfile) as fh:
3271 fh.read()
3279 fh.read()
3272
3280
3273 def parseindex():
3281 def parseindex():
3274 parse_index_v1(data, inline)
3282 parse_index_v1(data, inline)
3275
3283
3276 def getentry(revornode):
3284 def getentry(revornode):
3277 index = parse_index_v1(data, inline)[0]
3285 index = parse_index_v1(data, inline)[0]
3278 index[revornode]
3286 index[revornode]
3279
3287
3280 def getentries(revs, count=1):
3288 def getentries(revs, count=1):
3281 index = parse_index_v1(data, inline)[0]
3289 index = parse_index_v1(data, inline)[0]
3282
3290
3283 for i in range(count):
3291 for i in range(count):
3284 for rev in revs:
3292 for rev in revs:
3285 index[rev]
3293 index[rev]
3286
3294
3287 def resolvenode(node):
3295 def resolvenode(node):
3288 index = parse_index_v1(data, inline)[0]
3296 index = parse_index_v1(data, inline)[0]
3289 rev = getattr(index, 'rev', None)
3297 rev = getattr(index, 'rev', None)
3290 if rev is None:
3298 if rev is None:
3291 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3299 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3292 # This only works for the C code.
3300 # This only works for the C code.
3293 if nodemap is None:
3301 if nodemap is None:
3294 return
3302 return
3295 rev = nodemap.__getitem__
3303 rev = nodemap.__getitem__
3296
3304
3297 try:
3305 try:
3298 rev(node)
3306 rev(node)
3299 except error.RevlogError:
3307 except error.RevlogError:
3300 pass
3308 pass
3301
3309
3302 def resolvenodes(nodes, count=1):
3310 def resolvenodes(nodes, count=1):
3303 index = parse_index_v1(data, inline)[0]
3311 index = parse_index_v1(data, inline)[0]
3304 rev = getattr(index, 'rev', None)
3312 rev = getattr(index, 'rev', None)
3305 if rev is None:
3313 if rev is None:
3306 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3314 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3307 # This only works for the C code.
3315 # This only works for the C code.
3308 if nodemap is None:
3316 if nodemap is None:
3309 return
3317 return
3310 rev = nodemap.__getitem__
3318 rev = nodemap.__getitem__
3311
3319
3312 for i in range(count):
3320 for i in range(count):
3313 for node in nodes:
3321 for node in nodes:
3314 try:
3322 try:
3315 rev(node)
3323 rev(node)
3316 except error.RevlogError:
3324 except error.RevlogError:
3317 pass
3325 pass
3318
3326
3319 benches = [
3327 benches = [
3320 (constructor, b'revlog constructor'),
3328 (constructor, b'revlog constructor'),
3321 (read, b'read'),
3329 (read, b'read'),
3322 (parseindex, b'create index object'),
3330 (parseindex, b'create index object'),
3323 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3331 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3324 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3332 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3325 (lambda: resolvenode(node0), b'look up node at rev 0'),
3333 (lambda: resolvenode(node0), b'look up node at rev 0'),
3326 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3334 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3327 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3335 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3328 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3336 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3329 (lambda: resolvenode(node100), b'look up node at tip'),
3337 (lambda: resolvenode(node100), b'look up node at tip'),
3330 # 2x variation is to measure caching impact.
3338 # 2x variation is to measure caching impact.
3331 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3339 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3332 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3340 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3333 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3341 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3334 (
3342 (
3335 lambda: resolvenodes(allnodesrev, 2),
3343 lambda: resolvenodes(allnodesrev, 2),
3336 b'look up all nodes 2x (reverse)',
3344 b'look up all nodes 2x (reverse)',
3337 ),
3345 ),
3338 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3346 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3339 (
3347 (
3340 lambda: getentries(allrevs, 2),
3348 lambda: getentries(allrevs, 2),
3341 b'retrieve all index entries 2x (forward)',
3349 b'retrieve all index entries 2x (forward)',
3342 ),
3350 ),
3343 (
3351 (
3344 lambda: getentries(allrevsrev),
3352 lambda: getentries(allrevsrev),
3345 b'retrieve all index entries (reverse)',
3353 b'retrieve all index entries (reverse)',
3346 ),
3354 ),
3347 (
3355 (
3348 lambda: getentries(allrevsrev, 2),
3356 lambda: getentries(allrevsrev, 2),
3349 b'retrieve all index entries 2x (reverse)',
3357 b'retrieve all index entries 2x (reverse)',
3350 ),
3358 ),
3351 ]
3359 ]
3352
3360
3353 for fn, title in benches:
3361 for fn, title in benches:
3354 timer, fm = gettimer(ui, opts)
3362 timer, fm = gettimer(ui, opts)
3355 timer(fn, title=title)
3363 timer(fn, title=title)
3356 fm.end()
3364 fm.end()
3357
3365
3358
3366
3359 @command(
3367 @command(
3360 b'perf::revlogrevisions|perfrevlogrevisions',
3368 b'perf::revlogrevisions|perfrevlogrevisions',
3361 revlogopts
3369 revlogopts
3362 + formatteropts
3370 + formatteropts
3363 + [
3371 + [
3364 (b'd', b'dist', 100, b'distance between the revisions'),
3372 (b'd', b'dist', 100, b'distance between the revisions'),
3365 (b's', b'startrev', 0, b'revision to start reading at'),
3373 (b's', b'startrev', 0, b'revision to start reading at'),
3366 (b'', b'reverse', False, b'read in reverse'),
3374 (b'', b'reverse', False, b'read in reverse'),
3367 ],
3375 ],
3368 b'-c|-m|FILE',
3376 b'-c|-m|FILE',
3369 )
3377 )
3370 def perfrevlogrevisions(
3378 def perfrevlogrevisions(
3371 ui, repo, file_=None, startrev=0, reverse=False, **opts
3379 ui, repo, file_=None, startrev=0, reverse=False, **opts
3372 ):
3380 ):
3373 """Benchmark reading a series of revisions from a revlog.
3381 """Benchmark reading a series of revisions from a revlog.
3374
3382
3375 By default, we read every ``-d/--dist`` revision from 0 to tip of
3383 By default, we read every ``-d/--dist`` revision from 0 to tip of
3376 the specified revlog.
3384 the specified revlog.
3377
3385
3378 The start revision can be defined via ``-s/--startrev``.
3386 The start revision can be defined via ``-s/--startrev``.
3379 """
3387 """
3380 opts = _byteskwargs(opts)
3388 opts = _byteskwargs(opts)
3381
3389
3382 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3390 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3383 rllen = getlen(ui)(rl)
3391 rllen = getlen(ui)(rl)
3384
3392
3385 if startrev < 0:
3393 if startrev < 0:
3386 startrev = rllen + startrev
3394 startrev = rllen + startrev
3387
3395
3388 def d():
3396 def d():
3389 rl.clearcaches()
3397 rl.clearcaches()
3390
3398
3391 beginrev = startrev
3399 beginrev = startrev
3392 endrev = rllen
3400 endrev = rllen
3393 dist = opts[b'dist']
3401 dist = opts[b'dist']
3394
3402
3395 if reverse:
3403 if reverse:
3396 beginrev, endrev = endrev - 1, beginrev - 1
3404 beginrev, endrev = endrev - 1, beginrev - 1
3397 dist = -1 * dist
3405 dist = -1 * dist
3398
3406
3399 for x in _xrange(beginrev, endrev, dist):
3407 for x in _xrange(beginrev, endrev, dist):
3400 # Old revisions don't support passing int.
3408 # Old revisions don't support passing int.
3401 n = rl.node(x)
3409 n = rl.node(x)
3402 rl.revision(n)
3410 rl.revision(n)
3403
3411
3404 timer, fm = gettimer(ui, opts)
3412 timer, fm = gettimer(ui, opts)
3405 timer(d)
3413 timer(d)
3406 fm.end()
3414 fm.end()
3407
3415
3408
3416
3409 @command(
3417 @command(
3410 b'perf::revlogwrite|perfrevlogwrite',
3418 b'perf::revlogwrite|perfrevlogwrite',
3411 revlogopts
3419 revlogopts
3412 + formatteropts
3420 + formatteropts
3413 + [
3421 + [
3414 (b's', b'startrev', 1000, b'revision to start writing at'),
3422 (b's', b'startrev', 1000, b'revision to start writing at'),
3415 (b'', b'stoprev', -1, b'last revision to write'),
3423 (b'', b'stoprev', -1, b'last revision to write'),
3416 (b'', b'count', 3, b'number of passes to perform'),
3424 (b'', b'count', 3, b'number of passes to perform'),
3417 (b'', b'details', False, b'print timing for every revisions tested'),
3425 (b'', b'details', False, b'print timing for every revisions tested'),
3418 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3426 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3419 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3427 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3420 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3428 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3421 ],
3429 ],
3422 b'-c|-m|FILE',
3430 b'-c|-m|FILE',
3423 )
3431 )
3424 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3432 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3425 """Benchmark writing a series of revisions to a revlog.
3433 """Benchmark writing a series of revisions to a revlog.
3426
3434
3427 Possible source values are:
3435 Possible source values are:
3428 * `full`: add from a full text (default).
3436 * `full`: add from a full text (default).
3429 * `parent-1`: add from a delta to the first parent
3437 * `parent-1`: add from a delta to the first parent
3430 * `parent-2`: add from a delta to the second parent if it exists
3438 * `parent-2`: add from a delta to the second parent if it exists
3431 (use a delta from the first parent otherwise)
3439 (use a delta from the first parent otherwise)
3432 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3440 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3433 * `storage`: add from the existing precomputed deltas
3441 * `storage`: add from the existing precomputed deltas
3434
3442
3435 Note: This performance command measures performance in a custom way. As a
3443 Note: This performance command measures performance in a custom way. As a
3436 result some of the global configuration of the 'perf' command does not
3444 result some of the global configuration of the 'perf' command does not
3437 apply to it:
3445 apply to it:
3438
3446
3439 * ``pre-run``: disabled
3447 * ``pre-run``: disabled
3440
3448
3441 * ``profile-benchmark``: disabled
3449 * ``profile-benchmark``: disabled
3442
3450
3443 * ``run-limits``: disabled use --count instead
3451 * ``run-limits``: disabled use --count instead
3444 """
3452 """
3445 opts = _byteskwargs(opts)
3453 opts = _byteskwargs(opts)
3446
3454
3447 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3455 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3448 rllen = getlen(ui)(rl)
3456 rllen = getlen(ui)(rl)
3449 if startrev < 0:
3457 if startrev < 0:
3450 startrev = rllen + startrev
3458 startrev = rllen + startrev
3451 if stoprev < 0:
3459 if stoprev < 0:
3452 stoprev = rllen + stoprev
3460 stoprev = rllen + stoprev
3453
3461
3454 lazydeltabase = opts['lazydeltabase']
3462 lazydeltabase = opts['lazydeltabase']
3455 source = opts['source']
3463 source = opts['source']
3456 clearcaches = opts['clear_caches']
3464 clearcaches = opts['clear_caches']
3457 validsource = (
3465 validsource = (
3458 b'full',
3466 b'full',
3459 b'parent-1',
3467 b'parent-1',
3460 b'parent-2',
3468 b'parent-2',
3461 b'parent-smallest',
3469 b'parent-smallest',
3462 b'storage',
3470 b'storage',
3463 )
3471 )
3464 if source not in validsource:
3472 if source not in validsource:
3465 raise error.Abort('invalid source type: %s' % source)
3473 raise error.Abort('invalid source type: %s' % source)
3466
3474
3467 ### actually gather results
3475 ### actually gather results
3468 count = opts['count']
3476 count = opts['count']
3469 if count <= 0:
3477 if count <= 0:
3470 raise error.Abort('invalide run count: %d' % count)
3478 raise error.Abort('invalide run count: %d' % count)
3471 allresults = []
3479 allresults = []
3472 for c in range(count):
3480 for c in range(count):
3473 timing = _timeonewrite(
3481 timing = _timeonewrite(
3474 ui,
3482 ui,
3475 rl,
3483 rl,
3476 source,
3484 source,
3477 startrev,
3485 startrev,
3478 stoprev,
3486 stoprev,
3479 c + 1,
3487 c + 1,
3480 lazydeltabase=lazydeltabase,
3488 lazydeltabase=lazydeltabase,
3481 clearcaches=clearcaches,
3489 clearcaches=clearcaches,
3482 )
3490 )
3483 allresults.append(timing)
3491 allresults.append(timing)
3484
3492
3485 ### consolidate the results in a single list
3493 ### consolidate the results in a single list
3486 results = []
3494 results = []
3487 for idx, (rev, t) in enumerate(allresults[0]):
3495 for idx, (rev, t) in enumerate(allresults[0]):
3488 ts = [t]
3496 ts = [t]
3489 for other in allresults[1:]:
3497 for other in allresults[1:]:
3490 orev, ot = other[idx]
3498 orev, ot = other[idx]
3491 assert orev == rev
3499 assert orev == rev
3492 ts.append(ot)
3500 ts.append(ot)
3493 results.append((rev, ts))
3501 results.append((rev, ts))
3494 resultcount = len(results)
3502 resultcount = len(results)
3495
3503
3496 ### Compute and display relevant statistics
3504 ### Compute and display relevant statistics
3497
3505
3498 # get a formatter
3506 # get a formatter
3499 fm = ui.formatter(b'perf', opts)
3507 fm = ui.formatter(b'perf', opts)
3500 displayall = ui.configbool(b"perf", b"all-timing", True)
3508 displayall = ui.configbool(b"perf", b"all-timing", True)
3501
3509
3502 # print individual details if requested
3510 # print individual details if requested
3503 if opts['details']:
3511 if opts['details']:
3504 for idx, item in enumerate(results, 1):
3512 for idx, item in enumerate(results, 1):
3505 rev, data = item
3513 rev, data = item
3506 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3514 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3507 formatone(fm, data, title=title, displayall=displayall)
3515 formatone(fm, data, title=title, displayall=displayall)
3508
3516
3509 # sorts results by median time
3517 # sorts results by median time
3510 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3518 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3511 # list of (name, index) to display)
3519 # list of (name, index) to display)
3512 relevants = [
3520 relevants = [
3513 ("min", 0),
3521 ("min", 0),
3514 ("10%", resultcount * 10 // 100),
3522 ("10%", resultcount * 10 // 100),
3515 ("25%", resultcount * 25 // 100),
3523 ("25%", resultcount * 25 // 100),
3516 ("50%", resultcount * 70 // 100),
3524 ("50%", resultcount * 70 // 100),
3517 ("75%", resultcount * 75 // 100),
3525 ("75%", resultcount * 75 // 100),
3518 ("90%", resultcount * 90 // 100),
3526 ("90%", resultcount * 90 // 100),
3519 ("95%", resultcount * 95 // 100),
3527 ("95%", resultcount * 95 // 100),
3520 ("99%", resultcount * 99 // 100),
3528 ("99%", resultcount * 99 // 100),
3521 ("99.9%", resultcount * 999 // 1000),
3529 ("99.9%", resultcount * 999 // 1000),
3522 ("99.99%", resultcount * 9999 // 10000),
3530 ("99.99%", resultcount * 9999 // 10000),
3523 ("99.999%", resultcount * 99999 // 100000),
3531 ("99.999%", resultcount * 99999 // 100000),
3524 ("max", -1),
3532 ("max", -1),
3525 ]
3533 ]
3526 if not ui.quiet:
3534 if not ui.quiet:
3527 for name, idx in relevants:
3535 for name, idx in relevants:
3528 data = results[idx]
3536 data = results[idx]
3529 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3537 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3530 formatone(fm, data[1], title=title, displayall=displayall)
3538 formatone(fm, data[1], title=title, displayall=displayall)
3531
3539
3532 # XXX summing that many float will not be very precise, we ignore this fact
3540 # XXX summing that many float will not be very precise, we ignore this fact
3533 # for now
3541 # for now
3534 totaltime = []
3542 totaltime = []
3535 for item in allresults:
3543 for item in allresults:
3536 totaltime.append(
3544 totaltime.append(
3537 (
3545 (
3538 sum(x[1][0] for x in item),
3546 sum(x[1][0] for x in item),
3539 sum(x[1][1] for x in item),
3547 sum(x[1][1] for x in item),
3540 sum(x[1][2] for x in item),
3548 sum(x[1][2] for x in item),
3541 )
3549 )
3542 )
3550 )
3543 formatone(
3551 formatone(
3544 fm,
3552 fm,
3545 totaltime,
3553 totaltime,
3546 title="total time (%d revs)" % resultcount,
3554 title="total time (%d revs)" % resultcount,
3547 displayall=displayall,
3555 displayall=displayall,
3548 )
3556 )
3549 fm.end()
3557 fm.end()
3550
3558
3551
3559
3552 class _faketr:
3560 class _faketr:
3553 def add(s, x, y, z=None):
3561 def add(s, x, y, z=None):
3554 return None
3562 return None
3555
3563
3556
3564
3557 def _timeonewrite(
3565 def _timeonewrite(
3558 ui,
3566 ui,
3559 orig,
3567 orig,
3560 source,
3568 source,
3561 startrev,
3569 startrev,
3562 stoprev,
3570 stoprev,
3563 runidx=None,
3571 runidx=None,
3564 lazydeltabase=True,
3572 lazydeltabase=True,
3565 clearcaches=True,
3573 clearcaches=True,
3566 ):
3574 ):
3567 timings = []
3575 timings = []
3568 tr = _faketr()
3576 tr = _faketr()
3569 with _temprevlog(ui, orig, startrev) as dest:
3577 with _temprevlog(ui, orig, startrev) as dest:
3570 if hasattr(dest, "delta_config"):
3578 if hasattr(dest, "delta_config"):
3571 dest.delta_config.lazy_delta_base = lazydeltabase
3579 dest.delta_config.lazy_delta_base = lazydeltabase
3572 else:
3580 else:
3573 dest._lazydeltabase = lazydeltabase
3581 dest._lazydeltabase = lazydeltabase
3574 revs = list(orig.revs(startrev, stoprev))
3582 revs = list(orig.revs(startrev, stoprev))
3575 total = len(revs)
3583 total = len(revs)
3576 topic = 'adding'
3584 topic = 'adding'
3577 if runidx is not None:
3585 if runidx is not None:
3578 topic += ' (run #%d)' % runidx
3586 topic += ' (run #%d)' % runidx
3579 # Support both old and new progress API
3587 # Support both old and new progress API
3580 if util.safehasattr(ui, 'makeprogress'):
3588 if util.safehasattr(ui, 'makeprogress'):
3581 progress = ui.makeprogress(topic, unit='revs', total=total)
3589 progress = ui.makeprogress(topic, unit='revs', total=total)
3582
3590
3583 def updateprogress(pos):
3591 def updateprogress(pos):
3584 progress.update(pos)
3592 progress.update(pos)
3585
3593
3586 def completeprogress():
3594 def completeprogress():
3587 progress.complete()
3595 progress.complete()
3588
3596
3589 else:
3597 else:
3590
3598
3591 def updateprogress(pos):
3599 def updateprogress(pos):
3592 ui.progress(topic, pos, unit='revs', total=total)
3600 ui.progress(topic, pos, unit='revs', total=total)
3593
3601
3594 def completeprogress():
3602 def completeprogress():
3595 ui.progress(topic, None, unit='revs', total=total)
3603 ui.progress(topic, None, unit='revs', total=total)
3596
3604
3597 for idx, rev in enumerate(revs):
3605 for idx, rev in enumerate(revs):
3598 updateprogress(idx)
3606 updateprogress(idx)
3599 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3607 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3600 if clearcaches:
3608 if clearcaches:
3601 dest.index.clearcaches()
3609 dest.index.clearcaches()
3602 dest.clearcaches()
3610 dest.clearcaches()
3603 with timeone() as r:
3611 with timeone() as r:
3604 dest.addrawrevision(*addargs, **addkwargs)
3612 dest.addrawrevision(*addargs, **addkwargs)
3605 timings.append((rev, r[0]))
3613 timings.append((rev, r[0]))
3606 updateprogress(total)
3614 updateprogress(total)
3607 completeprogress()
3615 completeprogress()
3608 return timings
3616 return timings
3609
3617
3610
3618
3611 def _getrevisionseed(orig, rev, tr, source):
3619 def _getrevisionseed(orig, rev, tr, source):
3612 from mercurial.node import nullid
3620 from mercurial.node import nullid
3613
3621
3614 linkrev = orig.linkrev(rev)
3622 linkrev = orig.linkrev(rev)
3615 node = orig.node(rev)
3623 node = orig.node(rev)
3616 p1, p2 = orig.parents(node)
3624 p1, p2 = orig.parents(node)
3617 flags = orig.flags(rev)
3625 flags = orig.flags(rev)
3618 cachedelta = None
3626 cachedelta = None
3619 text = None
3627 text = None
3620
3628
3621 if source == b'full':
3629 if source == b'full':
3622 text = orig.revision(rev)
3630 text = orig.revision(rev)
3623 elif source == b'parent-1':
3631 elif source == b'parent-1':
3624 baserev = orig.rev(p1)
3632 baserev = orig.rev(p1)
3625 cachedelta = (baserev, orig.revdiff(p1, rev))
3633 cachedelta = (baserev, orig.revdiff(p1, rev))
3626 elif source == b'parent-2':
3634 elif source == b'parent-2':
3627 parent = p2
3635 parent = p2
3628 if p2 == nullid:
3636 if p2 == nullid:
3629 parent = p1
3637 parent = p1
3630 baserev = orig.rev(parent)
3638 baserev = orig.rev(parent)
3631 cachedelta = (baserev, orig.revdiff(parent, rev))
3639 cachedelta = (baserev, orig.revdiff(parent, rev))
3632 elif source == b'parent-smallest':
3640 elif source == b'parent-smallest':
3633 p1diff = orig.revdiff(p1, rev)
3641 p1diff = orig.revdiff(p1, rev)
3634 parent = p1
3642 parent = p1
3635 diff = p1diff
3643 diff = p1diff
3636 if p2 != nullid:
3644 if p2 != nullid:
3637 p2diff = orig.revdiff(p2, rev)
3645 p2diff = orig.revdiff(p2, rev)
3638 if len(p1diff) > len(p2diff):
3646 if len(p1diff) > len(p2diff):
3639 parent = p2
3647 parent = p2
3640 diff = p2diff
3648 diff = p2diff
3641 baserev = orig.rev(parent)
3649 baserev = orig.rev(parent)
3642 cachedelta = (baserev, diff)
3650 cachedelta = (baserev, diff)
3643 elif source == b'storage':
3651 elif source == b'storage':
3644 baserev = orig.deltaparent(rev)
3652 baserev = orig.deltaparent(rev)
3645 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3653 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3646
3654
3647 return (
3655 return (
3648 (text, tr, linkrev, p1, p2),
3656 (text, tr, linkrev, p1, p2),
3649 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3657 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3650 )
3658 )
3651
3659
3652
3660
3653 @contextlib.contextmanager
3661 @contextlib.contextmanager
3654 def _temprevlog(ui, orig, truncaterev):
3662 def _temprevlog(ui, orig, truncaterev):
3655 from mercurial import vfs as vfsmod
3663 from mercurial import vfs as vfsmod
3656
3664
3657 if orig._inline:
3665 if orig._inline:
3658 raise error.Abort('not supporting inline revlog (yet)')
3666 raise error.Abort('not supporting inline revlog (yet)')
3659 revlogkwargs = {}
3667 revlogkwargs = {}
3660 k = 'upperboundcomp'
3668 k = 'upperboundcomp'
3661 if util.safehasattr(orig, k):
3669 if util.safehasattr(orig, k):
3662 revlogkwargs[k] = getattr(orig, k)
3670 revlogkwargs[k] = getattr(orig, k)
3663
3671
3664 indexfile = getattr(orig, '_indexfile', None)
3672 indexfile = getattr(orig, '_indexfile', None)
3665 if indexfile is None:
3673 if indexfile is None:
3666 # compatibility with <= hg-5.8
3674 # compatibility with <= hg-5.8
3667 indexfile = getattr(orig, 'indexfile')
3675 indexfile = getattr(orig, 'indexfile')
3668 origindexpath = orig.opener.join(indexfile)
3676 origindexpath = orig.opener.join(indexfile)
3669
3677
3670 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3678 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3671 origdatapath = orig.opener.join(datafile)
3679 origdatapath = orig.opener.join(datafile)
3672 radix = b'revlog'
3680 radix = b'revlog'
3673 indexname = b'revlog.i'
3681 indexname = b'revlog.i'
3674 dataname = b'revlog.d'
3682 dataname = b'revlog.d'
3675
3683
3676 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3684 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3677 try:
3685 try:
3678 # copy the data file in a temporary directory
3686 # copy the data file in a temporary directory
3679 ui.debug('copying data in %s\n' % tmpdir)
3687 ui.debug('copying data in %s\n' % tmpdir)
3680 destindexpath = os.path.join(tmpdir, 'revlog.i')
3688 destindexpath = os.path.join(tmpdir, 'revlog.i')
3681 destdatapath = os.path.join(tmpdir, 'revlog.d')
3689 destdatapath = os.path.join(tmpdir, 'revlog.d')
3682 shutil.copyfile(origindexpath, destindexpath)
3690 shutil.copyfile(origindexpath, destindexpath)
3683 shutil.copyfile(origdatapath, destdatapath)
3691 shutil.copyfile(origdatapath, destdatapath)
3684
3692
3685 # remove the data we want to add again
3693 # remove the data we want to add again
3686 ui.debug('truncating data to be rewritten\n')
3694 ui.debug('truncating data to be rewritten\n')
3687 with open(destindexpath, 'ab') as index:
3695 with open(destindexpath, 'ab') as index:
3688 index.seek(0)
3696 index.seek(0)
3689 index.truncate(truncaterev * orig._io.size)
3697 index.truncate(truncaterev * orig._io.size)
3690 with open(destdatapath, 'ab') as data:
3698 with open(destdatapath, 'ab') as data:
3691 data.seek(0)
3699 data.seek(0)
3692 data.truncate(orig.start(truncaterev))
3700 data.truncate(orig.start(truncaterev))
3693
3701
3694 # instantiate a new revlog from the temporary copy
3702 # instantiate a new revlog from the temporary copy
3695 ui.debug('truncating adding to be rewritten\n')
3703 ui.debug('truncating adding to be rewritten\n')
3696 vfs = vfsmod.vfs(tmpdir)
3704 vfs = vfsmod.vfs(tmpdir)
3697 vfs.options = getattr(orig.opener, 'options', None)
3705 vfs.options = getattr(orig.opener, 'options', None)
3698
3706
3699 try:
3707 try:
3700 dest = revlog(vfs, radix=radix, **revlogkwargs)
3708 dest = revlog(vfs, radix=radix, **revlogkwargs)
3701 except TypeError:
3709 except TypeError:
3702 dest = revlog(
3710 dest = revlog(
3703 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3711 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3704 )
3712 )
3705 if dest._inline:
3713 if dest._inline:
3706 raise error.Abort('not supporting inline revlog (yet)')
3714 raise error.Abort('not supporting inline revlog (yet)')
3707 # make sure internals are initialized
3715 # make sure internals are initialized
3708 dest.revision(len(dest) - 1)
3716 dest.revision(len(dest) - 1)
3709 yield dest
3717 yield dest
3710 del dest, vfs
3718 del dest, vfs
3711 finally:
3719 finally:
3712 shutil.rmtree(tmpdir, True)
3720 shutil.rmtree(tmpdir, True)
3713
3721
3714
3722
3715 @command(
3723 @command(
3716 b'perf::revlogchunks|perfrevlogchunks',
3724 b'perf::revlogchunks|perfrevlogchunks',
3717 revlogopts
3725 revlogopts
3718 + formatteropts
3726 + formatteropts
3719 + [
3727 + [
3720 (b'e', b'engines', b'', b'compression engines to use'),
3728 (b'e', b'engines', b'', b'compression engines to use'),
3721 (b's', b'startrev', 0, b'revision to start at'),
3729 (b's', b'startrev', 0, b'revision to start at'),
3722 ],
3730 ],
3723 b'-c|-m|FILE',
3731 b'-c|-m|FILE',
3724 )
3732 )
3725 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3733 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3726 """Benchmark operations on revlog chunks.
3734 """Benchmark operations on revlog chunks.
3727
3735
3728 Logically, each revlog is a collection of fulltext revisions. However,
3736 Logically, each revlog is a collection of fulltext revisions. However,
3729 stored within each revlog are "chunks" of possibly compressed data. This
3737 stored within each revlog are "chunks" of possibly compressed data. This
3730 data needs to be read and decompressed or compressed and written.
3738 data needs to be read and decompressed or compressed and written.
3731
3739
3732 This command measures the time it takes to read+decompress and recompress
3740 This command measures the time it takes to read+decompress and recompress
3733 chunks in a revlog. It effectively isolates I/O and compression performance.
3741 chunks in a revlog. It effectively isolates I/O and compression performance.
3734 For measurements of higher-level operations like resolving revisions,
3742 For measurements of higher-level operations like resolving revisions,
3735 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3743 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3736 """
3744 """
3737 opts = _byteskwargs(opts)
3745 opts = _byteskwargs(opts)
3738
3746
3739 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3747 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3740
3748
3741 # - _chunkraw was renamed to _getsegmentforrevs
3749 # - _chunkraw was renamed to _getsegmentforrevs
3742 # - _getsegmentforrevs was moved on the inner object
3750 # - _getsegmentforrevs was moved on the inner object
3743 try:
3751 try:
3744 segmentforrevs = rl._inner.get_segment_for_revs
3752 segmentforrevs = rl._inner.get_segment_for_revs
3745 except AttributeError:
3753 except AttributeError:
3746 try:
3754 try:
3747 segmentforrevs = rl._getsegmentforrevs
3755 segmentforrevs = rl._getsegmentforrevs
3748 except AttributeError:
3756 except AttributeError:
3749 segmentforrevs = rl._chunkraw
3757 segmentforrevs = rl._chunkraw
3750
3758
3751 # Verify engines argument.
3759 # Verify engines argument.
3752 if engines:
3760 if engines:
3753 engines = {e.strip() for e in engines.split(b',')}
3761 engines = {e.strip() for e in engines.split(b',')}
3754 for engine in engines:
3762 for engine in engines:
3755 try:
3763 try:
3756 util.compressionengines[engine]
3764 util.compressionengines[engine]
3757 except KeyError:
3765 except KeyError:
3758 raise error.Abort(b'unknown compression engine: %s' % engine)
3766 raise error.Abort(b'unknown compression engine: %s' % engine)
3759 else:
3767 else:
3760 engines = []
3768 engines = []
3761 for e in util.compengines:
3769 for e in util.compengines:
3762 engine = util.compengines[e]
3770 engine = util.compengines[e]
3763 try:
3771 try:
3764 if engine.available():
3772 if engine.available():
3765 engine.revlogcompressor().compress(b'dummy')
3773 engine.revlogcompressor().compress(b'dummy')
3766 engines.append(e)
3774 engines.append(e)
3767 except NotImplementedError:
3775 except NotImplementedError:
3768 pass
3776 pass
3769
3777
3770 revs = list(rl.revs(startrev, len(rl) - 1))
3778 revs = list(rl.revs(startrev, len(rl) - 1))
3771
3779
3772 @contextlib.contextmanager
3780 @contextlib.contextmanager
3773 def reading(rl):
3781 def reading(rl):
3774 if getattr(rl, 'reading', None) is not None:
3782 if getattr(rl, 'reading', None) is not None:
3775 with rl.reading():
3783 with rl.reading():
3776 yield None
3784 yield None
3777 elif rl._inline:
3785 elif rl._inline:
3778 indexfile = getattr(rl, '_indexfile', None)
3786 indexfile = getattr(rl, '_indexfile', None)
3779 if indexfile is None:
3787 if indexfile is None:
3780 # compatibility with <= hg-5.8
3788 # compatibility with <= hg-5.8
3781 indexfile = getattr(rl, 'indexfile')
3789 indexfile = getattr(rl, 'indexfile')
3782 yield getsvfs(repo)(indexfile)
3790 yield getsvfs(repo)(indexfile)
3783 else:
3791 else:
3784 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3792 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3785 yield getsvfs(repo)(datafile)
3793 yield getsvfs(repo)(datafile)
3786
3794
3787 if getattr(rl, 'reading', None) is not None:
3795 if getattr(rl, 'reading', None) is not None:
3788
3796
3789 @contextlib.contextmanager
3797 @contextlib.contextmanager
3790 def lazy_reading(rl):
3798 def lazy_reading(rl):
3791 with rl.reading():
3799 with rl.reading():
3792 yield
3800 yield
3793
3801
3794 else:
3802 else:
3795
3803
3796 @contextlib.contextmanager
3804 @contextlib.contextmanager
3797 def lazy_reading(rl):
3805 def lazy_reading(rl):
3798 yield
3806 yield
3799
3807
3800 def doread():
3808 def doread():
3801 rl.clearcaches()
3809 rl.clearcaches()
3802 for rev in revs:
3810 for rev in revs:
3803 with lazy_reading(rl):
3811 with lazy_reading(rl):
3804 segmentforrevs(rev, rev)
3812 segmentforrevs(rev, rev)
3805
3813
3806 def doreadcachedfh():
3814 def doreadcachedfh():
3807 rl.clearcaches()
3815 rl.clearcaches()
3808 with reading(rl) as fh:
3816 with reading(rl) as fh:
3809 if fh is not None:
3817 if fh is not None:
3810 for rev in revs:
3818 for rev in revs:
3811 segmentforrevs(rev, rev, df=fh)
3819 segmentforrevs(rev, rev, df=fh)
3812 else:
3820 else:
3813 for rev in revs:
3821 for rev in revs:
3814 segmentforrevs(rev, rev)
3822 segmentforrevs(rev, rev)
3815
3823
3816 def doreadbatch():
3824 def doreadbatch():
3817 rl.clearcaches()
3825 rl.clearcaches()
3818 with lazy_reading(rl):
3826 with lazy_reading(rl):
3819 segmentforrevs(revs[0], revs[-1])
3827 segmentforrevs(revs[0], revs[-1])
3820
3828
3821 def doreadbatchcachedfh():
3829 def doreadbatchcachedfh():
3822 rl.clearcaches()
3830 rl.clearcaches()
3823 with reading(rl) as fh:
3831 with reading(rl) as fh:
3824 if fh is not None:
3832 if fh is not None:
3825 segmentforrevs(revs[0], revs[-1], df=fh)
3833 segmentforrevs(revs[0], revs[-1], df=fh)
3826 else:
3834 else:
3827 segmentforrevs(revs[0], revs[-1])
3835 segmentforrevs(revs[0], revs[-1])
3828
3836
3829 def dochunk():
3837 def dochunk():
3830 rl.clearcaches()
3838 rl.clearcaches()
3831 # chunk used to be available directly on the revlog
3839 # chunk used to be available directly on the revlog
3832 _chunk = getattr(rl, '_inner', rl)._chunk
3840 _chunk = getattr(rl, '_inner', rl)._chunk
3833 with reading(rl) as fh:
3841 with reading(rl) as fh:
3834 if fh is not None:
3842 if fh is not None:
3835 for rev in revs:
3843 for rev in revs:
3836 _chunk(rev, df=fh)
3844 _chunk(rev, df=fh)
3837 else:
3845 else:
3838 for rev in revs:
3846 for rev in revs:
3839 _chunk(rev)
3847 _chunk(rev)
3840
3848
3841 chunks = [None]
3849 chunks = [None]
3842
3850
3843 def dochunkbatch():
3851 def dochunkbatch():
3844 rl.clearcaches()
3852 rl.clearcaches()
3845 _chunks = getattr(rl, '_inner', rl)._chunks
3853 _chunks = getattr(rl, '_inner', rl)._chunks
3846 with reading(rl) as fh:
3854 with reading(rl) as fh:
3847 if fh is not None:
3855 if fh is not None:
3848 # Save chunks as a side-effect.
3856 # Save chunks as a side-effect.
3849 chunks[0] = _chunks(revs, df=fh)
3857 chunks[0] = _chunks(revs, df=fh)
3850 else:
3858 else:
3851 # Save chunks as a side-effect.
3859 # Save chunks as a side-effect.
3852 chunks[0] = _chunks(revs)
3860 chunks[0] = _chunks(revs)
3853
3861
3854 def docompress(compressor):
3862 def docompress(compressor):
3855 rl.clearcaches()
3863 rl.clearcaches()
3856
3864
3857 compressor_holder = getattr(rl, '_inner', rl)
3865 compressor_holder = getattr(rl, '_inner', rl)
3858
3866
3859 try:
3867 try:
3860 # Swap in the requested compression engine.
3868 # Swap in the requested compression engine.
3861 oldcompressor = compressor_holder._compressor
3869 oldcompressor = compressor_holder._compressor
3862 compressor_holder._compressor = compressor
3870 compressor_holder._compressor = compressor
3863 for chunk in chunks[0]:
3871 for chunk in chunks[0]:
3864 rl.compress(chunk)
3872 rl.compress(chunk)
3865 finally:
3873 finally:
3866 compressor_holder._compressor = oldcompressor
3874 compressor_holder._compressor = oldcompressor
3867
3875
3868 benches = [
3876 benches = [
3869 (lambda: doread(), b'read'),
3877 (lambda: doread(), b'read'),
3870 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3878 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3871 (lambda: doreadbatch(), b'read batch'),
3879 (lambda: doreadbatch(), b'read batch'),
3872 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3880 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3873 (lambda: dochunk(), b'chunk'),
3881 (lambda: dochunk(), b'chunk'),
3874 (lambda: dochunkbatch(), b'chunk batch'),
3882 (lambda: dochunkbatch(), b'chunk batch'),
3875 ]
3883 ]
3876
3884
3877 for engine in sorted(engines):
3885 for engine in sorted(engines):
3878 compressor = util.compengines[engine].revlogcompressor()
3886 compressor = util.compengines[engine].revlogcompressor()
3879 benches.append(
3887 benches.append(
3880 (
3888 (
3881 functools.partial(docompress, compressor),
3889 functools.partial(docompress, compressor),
3882 b'compress w/ %s' % engine,
3890 b'compress w/ %s' % engine,
3883 )
3891 )
3884 )
3892 )
3885
3893
3886 for fn, title in benches:
3894 for fn, title in benches:
3887 timer, fm = gettimer(ui, opts)
3895 timer, fm = gettimer(ui, opts)
3888 timer(fn, title=title)
3896 timer(fn, title=title)
3889 fm.end()
3897 fm.end()
3890
3898
3891
3899
3892 @command(
3900 @command(
3893 b'perf::revlogrevision|perfrevlogrevision',
3901 b'perf::revlogrevision|perfrevlogrevision',
3894 revlogopts
3902 revlogopts
3895 + formatteropts
3903 + formatteropts
3896 + [(b'', b'cache', False, b'use caches instead of clearing')],
3904 + [(b'', b'cache', False, b'use caches instead of clearing')],
3897 b'-c|-m|FILE REV',
3905 b'-c|-m|FILE REV',
3898 )
3906 )
3899 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3907 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3900 """Benchmark obtaining a revlog revision.
3908 """Benchmark obtaining a revlog revision.
3901
3909
3902 Obtaining a revlog revision consists of roughly the following steps:
3910 Obtaining a revlog revision consists of roughly the following steps:
3903
3911
3904 1. Compute the delta chain
3912 1. Compute the delta chain
3905 2. Slice the delta chain if applicable
3913 2. Slice the delta chain if applicable
3906 3. Obtain the raw chunks for that delta chain
3914 3. Obtain the raw chunks for that delta chain
3907 4. Decompress each raw chunk
3915 4. Decompress each raw chunk
3908 5. Apply binary patches to obtain fulltext
3916 5. Apply binary patches to obtain fulltext
3909 6. Verify hash of fulltext
3917 6. Verify hash of fulltext
3910
3918
3911 This command measures the time spent in each of these phases.
3919 This command measures the time spent in each of these phases.
3912 """
3920 """
3913 opts = _byteskwargs(opts)
3921 opts = _byteskwargs(opts)
3914
3922
3915 if opts.get(b'changelog') or opts.get(b'manifest'):
3923 if opts.get(b'changelog') or opts.get(b'manifest'):
3916 file_, rev = None, file_
3924 file_, rev = None, file_
3917 elif rev is None:
3925 elif rev is None:
3918 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3926 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3919
3927
3920 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3928 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3921
3929
3922 # _chunkraw was renamed to _getsegmentforrevs.
3930 # _chunkraw was renamed to _getsegmentforrevs.
3923 try:
3931 try:
3924 segmentforrevs = r._inner.get_segment_for_revs
3932 segmentforrevs = r._inner.get_segment_for_revs
3925 except AttributeError:
3933 except AttributeError:
3926 try:
3934 try:
3927 segmentforrevs = r._getsegmentforrevs
3935 segmentforrevs = r._getsegmentforrevs
3928 except AttributeError:
3936 except AttributeError:
3929 segmentforrevs = r._chunkraw
3937 segmentforrevs = r._chunkraw
3930
3938
3931 node = r.lookup(rev)
3939 node = r.lookup(rev)
3932 rev = r.rev(node)
3940 rev = r.rev(node)
3933
3941
3934 if getattr(r, 'reading', None) is not None:
3942 if getattr(r, 'reading', None) is not None:
3935
3943
3936 @contextlib.contextmanager
3944 @contextlib.contextmanager
3937 def lazy_reading(r):
3945 def lazy_reading(r):
3938 with r.reading():
3946 with r.reading():
3939 yield
3947 yield
3940
3948
3941 else:
3949 else:
3942
3950
3943 @contextlib.contextmanager
3951 @contextlib.contextmanager
3944 def lazy_reading(r):
3952 def lazy_reading(r):
3945 yield
3953 yield
3946
3954
3947 def getrawchunks(data, chain):
3955 def getrawchunks(data, chain):
3948 start = r.start
3956 start = r.start
3949 length = r.length
3957 length = r.length
3950 inline = r._inline
3958 inline = r._inline
3951 try:
3959 try:
3952 iosize = r.index.entry_size
3960 iosize = r.index.entry_size
3953 except AttributeError:
3961 except AttributeError:
3954 iosize = r._io.size
3962 iosize = r._io.size
3955 buffer = util.buffer
3963 buffer = util.buffer
3956
3964
3957 chunks = []
3965 chunks = []
3958 ladd = chunks.append
3966 ladd = chunks.append
3959 for idx, item in enumerate(chain):
3967 for idx, item in enumerate(chain):
3960 offset = start(item[0])
3968 offset = start(item[0])
3961 bits = data[idx]
3969 bits = data[idx]
3962 for rev in item:
3970 for rev in item:
3963 chunkstart = start(rev)
3971 chunkstart = start(rev)
3964 if inline:
3972 if inline:
3965 chunkstart += (rev + 1) * iosize
3973 chunkstart += (rev + 1) * iosize
3966 chunklength = length(rev)
3974 chunklength = length(rev)
3967 ladd(buffer(bits, chunkstart - offset, chunklength))
3975 ladd(buffer(bits, chunkstart - offset, chunklength))
3968
3976
3969 return chunks
3977 return chunks
3970
3978
3971 def dodeltachain(rev):
3979 def dodeltachain(rev):
3972 if not cache:
3980 if not cache:
3973 r.clearcaches()
3981 r.clearcaches()
3974 r._deltachain(rev)
3982 r._deltachain(rev)
3975
3983
3976 def doread(chain):
3984 def doread(chain):
3977 if not cache:
3985 if not cache:
3978 r.clearcaches()
3986 r.clearcaches()
3979 for item in slicedchain:
3987 for item in slicedchain:
3980 with lazy_reading(r):
3988 with lazy_reading(r):
3981 segmentforrevs(item[0], item[-1])
3989 segmentforrevs(item[0], item[-1])
3982
3990
3983 def doslice(r, chain, size):
3991 def doslice(r, chain, size):
3984 for s in slicechunk(r, chain, targetsize=size):
3992 for s in slicechunk(r, chain, targetsize=size):
3985 pass
3993 pass
3986
3994
3987 def dorawchunks(data, chain):
3995 def dorawchunks(data, chain):
3988 if not cache:
3996 if not cache:
3989 r.clearcaches()
3997 r.clearcaches()
3990 getrawchunks(data, chain)
3998 getrawchunks(data, chain)
3991
3999
3992 def dodecompress(chunks):
4000 def dodecompress(chunks):
3993 decomp = r.decompress
4001 decomp = r.decompress
3994 for chunk in chunks:
4002 for chunk in chunks:
3995 decomp(chunk)
4003 decomp(chunk)
3996
4004
3997 def dopatch(text, bins):
4005 def dopatch(text, bins):
3998 if not cache:
4006 if not cache:
3999 r.clearcaches()
4007 r.clearcaches()
4000 mdiff.patches(text, bins)
4008 mdiff.patches(text, bins)
4001
4009
4002 def dohash(text):
4010 def dohash(text):
4003 if not cache:
4011 if not cache:
4004 r.clearcaches()
4012 r.clearcaches()
4005 r.checkhash(text, node, rev=rev)
4013 r.checkhash(text, node, rev=rev)
4006
4014
4007 def dorevision():
4015 def dorevision():
4008 if not cache:
4016 if not cache:
4009 r.clearcaches()
4017 r.clearcaches()
4010 r.revision(node)
4018 r.revision(node)
4011
4019
4012 try:
4020 try:
4013 from mercurial.revlogutils.deltas import slicechunk
4021 from mercurial.revlogutils.deltas import slicechunk
4014 except ImportError:
4022 except ImportError:
4015 slicechunk = getattr(revlog, '_slicechunk', None)
4023 slicechunk = getattr(revlog, '_slicechunk', None)
4016
4024
4017 size = r.length(rev)
4025 size = r.length(rev)
4018 chain = r._deltachain(rev)[0]
4026 chain = r._deltachain(rev)[0]
4019
4027
4020 with_sparse_read = False
4028 with_sparse_read = False
4021 if hasattr(r, 'data_config'):
4029 if hasattr(r, 'data_config'):
4022 with_sparse_read = r.data_config.with_sparse_read
4030 with_sparse_read = r.data_config.with_sparse_read
4023 elif hasattr(r, '_withsparseread'):
4031 elif hasattr(r, '_withsparseread'):
4024 with_sparse_read = r._withsparseread
4032 with_sparse_read = r._withsparseread
4025 if with_sparse_read:
4033 if with_sparse_read:
4026 slicedchain = (chain,)
4034 slicedchain = (chain,)
4027 else:
4035 else:
4028 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4036 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4029 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4037 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4030 rawchunks = getrawchunks(data, slicedchain)
4038 rawchunks = getrawchunks(data, slicedchain)
4031 bins = r._inner._chunks(chain)
4039 bins = r._inner._chunks(chain)
4032 text = bytes(bins[0])
4040 text = bytes(bins[0])
4033 bins = bins[1:]
4041 bins = bins[1:]
4034 text = mdiff.patches(text, bins)
4042 text = mdiff.patches(text, bins)
4035
4043
4036 benches = [
4044 benches = [
4037 (lambda: dorevision(), b'full'),
4045 (lambda: dorevision(), b'full'),
4038 (lambda: dodeltachain(rev), b'deltachain'),
4046 (lambda: dodeltachain(rev), b'deltachain'),
4039 (lambda: doread(chain), b'read'),
4047 (lambda: doread(chain), b'read'),
4040 ]
4048 ]
4041
4049
4042 if with_sparse_read:
4050 if with_sparse_read:
4043 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4051 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4044 benches.append(slicing)
4052 benches.append(slicing)
4045
4053
4046 benches.extend(
4054 benches.extend(
4047 [
4055 [
4048 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4056 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4049 (lambda: dodecompress(rawchunks), b'decompress'),
4057 (lambda: dodecompress(rawchunks), b'decompress'),
4050 (lambda: dopatch(text, bins), b'patch'),
4058 (lambda: dopatch(text, bins), b'patch'),
4051 (lambda: dohash(text), b'hash'),
4059 (lambda: dohash(text), b'hash'),
4052 ]
4060 ]
4053 )
4061 )
4054
4062
4055 timer, fm = gettimer(ui, opts)
4063 timer, fm = gettimer(ui, opts)
4056 for fn, title in benches:
4064 for fn, title in benches:
4057 timer(fn, title=title)
4065 timer(fn, title=title)
4058 fm.end()
4066 fm.end()
4059
4067
4060
4068
4061 @command(
4069 @command(
4062 b'perf::revset|perfrevset',
4070 b'perf::revset|perfrevset',
4063 [
4071 [
4064 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4072 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4065 (b'', b'contexts', False, b'obtain changectx for each revision'),
4073 (b'', b'contexts', False, b'obtain changectx for each revision'),
4066 ]
4074 ]
4067 + formatteropts,
4075 + formatteropts,
4068 b"REVSET",
4076 b"REVSET",
4069 )
4077 )
4070 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4078 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4071 """benchmark the execution time of a revset
4079 """benchmark the execution time of a revset
4072
4080
4073 Use the --clean option if need to evaluate the impact of build volatile
4081 Use the --clean option if need to evaluate the impact of build volatile
4074 revisions set cache on the revset execution. Volatile cache hold filtered
4082 revisions set cache on the revset execution. Volatile cache hold filtered
4075 and obsolete related cache."""
4083 and obsolete related cache."""
4076 opts = _byteskwargs(opts)
4084 opts = _byteskwargs(opts)
4077
4085
4078 timer, fm = gettimer(ui, opts)
4086 timer, fm = gettimer(ui, opts)
4079
4087
4080 def d():
4088 def d():
4081 if clear:
4089 if clear:
4082 repo.invalidatevolatilesets()
4090 repo.invalidatevolatilesets()
4083 if contexts:
4091 if contexts:
4084 for ctx in repo.set(expr):
4092 for ctx in repo.set(expr):
4085 pass
4093 pass
4086 else:
4094 else:
4087 for r in repo.revs(expr):
4095 for r in repo.revs(expr):
4088 pass
4096 pass
4089
4097
4090 timer(d)
4098 timer(d)
4091 fm.end()
4099 fm.end()
4092
4100
4093
4101
4094 @command(
4102 @command(
4095 b'perf::volatilesets|perfvolatilesets',
4103 b'perf::volatilesets|perfvolatilesets',
4096 [
4104 [
4097 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4105 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4098 ]
4106 ]
4099 + formatteropts,
4107 + formatteropts,
4100 )
4108 )
4101 def perfvolatilesets(ui, repo, *names, **opts):
4109 def perfvolatilesets(ui, repo, *names, **opts):
4102 """benchmark the computation of various volatile set
4110 """benchmark the computation of various volatile set
4103
4111
4104 Volatile set computes element related to filtering and obsolescence."""
4112 Volatile set computes element related to filtering and obsolescence."""
4105 opts = _byteskwargs(opts)
4113 opts = _byteskwargs(opts)
4106 timer, fm = gettimer(ui, opts)
4114 timer, fm = gettimer(ui, opts)
4107 repo = repo.unfiltered()
4115 repo = repo.unfiltered()
4108
4116
4109 def getobs(name):
4117 def getobs(name):
4110 def d():
4118 def d():
4111 repo.invalidatevolatilesets()
4119 repo.invalidatevolatilesets()
4112 if opts[b'clear_obsstore']:
4120 if opts[b'clear_obsstore']:
4113 clearfilecache(repo, b'obsstore')
4121 clearfilecache(repo, b'obsstore')
4114 obsolete.getrevs(repo, name)
4122 obsolete.getrevs(repo, name)
4115
4123
4116 return d
4124 return d
4117
4125
4118 allobs = sorted(obsolete.cachefuncs)
4126 allobs = sorted(obsolete.cachefuncs)
4119 if names:
4127 if names:
4120 allobs = [n for n in allobs if n in names]
4128 allobs = [n for n in allobs if n in names]
4121
4129
4122 for name in allobs:
4130 for name in allobs:
4123 timer(getobs(name), title=name)
4131 timer(getobs(name), title=name)
4124
4132
4125 def getfiltered(name):
4133 def getfiltered(name):
4126 def d():
4134 def d():
4127 repo.invalidatevolatilesets()
4135 repo.invalidatevolatilesets()
4128 if opts[b'clear_obsstore']:
4136 if opts[b'clear_obsstore']:
4129 clearfilecache(repo, b'obsstore')
4137 clearfilecache(repo, b'obsstore')
4130 repoview.filterrevs(repo, name)
4138 repoview.filterrevs(repo, name)
4131
4139
4132 return d
4140 return d
4133
4141
4134 allfilter = sorted(repoview.filtertable)
4142 allfilter = sorted(repoview.filtertable)
4135 if names:
4143 if names:
4136 allfilter = [n for n in allfilter if n in names]
4144 allfilter = [n for n in allfilter if n in names]
4137
4145
4138 for name in allfilter:
4146 for name in allfilter:
4139 timer(getfiltered(name), title=name)
4147 timer(getfiltered(name), title=name)
4140 fm.end()
4148 fm.end()
4141
4149
4142
4150
4143 @command(
4151 @command(
4144 b'perf::branchmap|perfbranchmap',
4152 b'perf::branchmap|perfbranchmap',
4145 [
4153 [
4146 (b'f', b'full', False, b'Includes build time of subset'),
4154 (b'f', b'full', False, b'Includes build time of subset'),
4147 (
4155 (
4148 b'',
4156 b'',
4149 b'clear-revbranch',
4157 b'clear-revbranch',
4150 False,
4158 False,
4151 b'purge the revbranch cache between computation',
4159 b'purge the revbranch cache between computation',
4152 ),
4160 ),
4153 ]
4161 ]
4154 + formatteropts,
4162 + formatteropts,
4155 )
4163 )
4156 def perfbranchmap(ui, repo, *filternames, **opts):
4164 def perfbranchmap(ui, repo, *filternames, **opts):
4157 """benchmark the update of a branchmap
4165 """benchmark the update of a branchmap
4158
4166
4159 This benchmarks the full repo.branchmap() call with read and write disabled
4167 This benchmarks the full repo.branchmap() call with read and write disabled
4160 """
4168 """
4161 opts = _byteskwargs(opts)
4169 opts = _byteskwargs(opts)
4162 full = opts.get(b"full", False)
4170 full = opts.get(b"full", False)
4163 clear_revbranch = opts.get(b"clear_revbranch", False)
4171 clear_revbranch = opts.get(b"clear_revbranch", False)
4164 timer, fm = gettimer(ui, opts)
4172 timer, fm = gettimer(ui, opts)
4165
4173
4166 def getbranchmap(filtername):
4174 def getbranchmap(filtername):
4167 """generate a benchmark function for the filtername"""
4175 """generate a benchmark function for the filtername"""
4168 if filtername is None:
4176 if filtername is None:
4169 view = repo
4177 view = repo
4170 else:
4178 else:
4171 view = repo.filtered(filtername)
4179 view = repo.filtered(filtername)
4172 if util.safehasattr(view._branchcaches, '_per_filter'):
4180 if util.safehasattr(view._branchcaches, '_per_filter'):
4173 filtered = view._branchcaches._per_filter
4181 filtered = view._branchcaches._per_filter
4174 else:
4182 else:
4175 # older versions
4183 # older versions
4176 filtered = view._branchcaches
4184 filtered = view._branchcaches
4177
4185
4178 def d():
4186 def d():
4179 if clear_revbranch:
4187 if clear_revbranch:
4180 repo.revbranchcache()._clear()
4188 repo.revbranchcache()._clear()
4181 if full:
4189 if full:
4182 view._branchcaches.clear()
4190 view._branchcaches.clear()
4183 else:
4191 else:
4184 filtered.pop(filtername, None)
4192 filtered.pop(filtername, None)
4185 view.branchmap()
4193 view.branchmap()
4186
4194
4187 return d
4195 return d
4188
4196
4189 # add filter in smaller subset to bigger subset
4197 # add filter in smaller subset to bigger subset
4190 possiblefilters = set(repoview.filtertable)
4198 possiblefilters = set(repoview.filtertable)
4191 if filternames:
4199 if filternames:
4192 possiblefilters &= set(filternames)
4200 possiblefilters &= set(filternames)
4193 subsettable = getbranchmapsubsettable()
4201 subsettable = getbranchmapsubsettable()
4194 allfilters = []
4202 allfilters = []
4195 while possiblefilters:
4203 while possiblefilters:
4196 for name in possiblefilters:
4204 for name in possiblefilters:
4197 subset = subsettable.get(name)
4205 subset = subsettable.get(name)
4198 if subset not in possiblefilters:
4206 if subset not in possiblefilters:
4199 break
4207 break
4200 else:
4208 else:
4201 assert False, b'subset cycle %s!' % possiblefilters
4209 assert False, b'subset cycle %s!' % possiblefilters
4202 allfilters.append(name)
4210 allfilters.append(name)
4203 possiblefilters.remove(name)
4211 possiblefilters.remove(name)
4204
4212
4205 # warm the cache
4213 # warm the cache
4206 if not full:
4214 if not full:
4207 for name in allfilters:
4215 for name in allfilters:
4208 repo.filtered(name).branchmap()
4216 repo.filtered(name).branchmap()
4209 if not filternames or b'unfiltered' in filternames:
4217 if not filternames or b'unfiltered' in filternames:
4210 # add unfiltered
4218 # add unfiltered
4211 allfilters.append(None)
4219 allfilters.append(None)
4212
4220
4213 old_branch_cache_from_file = None
4221 old_branch_cache_from_file = None
4214 branchcacheread = None
4222 branchcacheread = None
4215 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4223 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4216 old_branch_cache_from_file = branchmap.branch_cache_from_file
4224 old_branch_cache_from_file = branchmap.branch_cache_from_file
4217 branchmap.branch_cache_from_file = lambda *args: None
4225 branchmap.branch_cache_from_file = lambda *args: None
4218 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4226 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4219 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4227 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4220 branchcacheread.set(classmethod(lambda *args: None))
4228 branchcacheread.set(classmethod(lambda *args: None))
4221 else:
4229 else:
4222 # older versions
4230 # older versions
4223 branchcacheread = safeattrsetter(branchmap, b'read')
4231 branchcacheread = safeattrsetter(branchmap, b'read')
4224 branchcacheread.set(lambda *args: None)
4232 branchcacheread.set(lambda *args: None)
4225 if util.safehasattr(branchmap, '_LocalBranchCache'):
4233 if util.safehasattr(branchmap, '_LocalBranchCache'):
4226 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4234 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4227 branchcachewrite.set(lambda *args: None)
4235 branchcachewrite.set(lambda *args: None)
4228 else:
4236 else:
4229 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4237 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4230 branchcachewrite.set(lambda *args: None)
4238 branchcachewrite.set(lambda *args: None)
4231 try:
4239 try:
4232 for name in allfilters:
4240 for name in allfilters:
4233 printname = name
4241 printname = name
4234 if name is None:
4242 if name is None:
4235 printname = b'unfiltered'
4243 printname = b'unfiltered'
4236 timer(getbranchmap(name), title=printname)
4244 timer(getbranchmap(name), title=printname)
4237 finally:
4245 finally:
4238 if old_branch_cache_from_file is not None:
4246 if old_branch_cache_from_file is not None:
4239 branchmap.branch_cache_from_file = old_branch_cache_from_file
4247 branchmap.branch_cache_from_file = old_branch_cache_from_file
4240 if branchcacheread is not None:
4248 if branchcacheread is not None:
4241 branchcacheread.restore()
4249 branchcacheread.restore()
4242 branchcachewrite.restore()
4250 branchcachewrite.restore()
4243 fm.end()
4251 fm.end()
4244
4252
4245
4253
4246 @command(
4254 @command(
4247 b'perf::branchmapupdate|perfbranchmapupdate',
4255 b'perf::branchmapupdate|perfbranchmapupdate',
4248 [
4256 [
4249 (b'', b'base', [], b'subset of revision to start from'),
4257 (b'', b'base', [], b'subset of revision to start from'),
4250 (b'', b'target', [], b'subset of revision to end with'),
4258 (b'', b'target', [], b'subset of revision to end with'),
4251 (b'', b'clear-caches', False, b'clear cache between each runs'),
4259 (b'', b'clear-caches', False, b'clear cache between each runs'),
4252 ]
4260 ]
4253 + formatteropts,
4261 + formatteropts,
4254 )
4262 )
4255 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4263 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4256 """benchmark branchmap update from for <base> revs to <target> revs
4264 """benchmark branchmap update from for <base> revs to <target> revs
4257
4265
4258 If `--clear-caches` is passed, the following items will be reset before
4266 If `--clear-caches` is passed, the following items will be reset before
4259 each update:
4267 each update:
4260 * the changelog instance and associated indexes
4268 * the changelog instance and associated indexes
4261 * the rev-branch-cache instance
4269 * the rev-branch-cache instance
4262
4270
4263 Examples:
4271 Examples:
4264
4272
4265 # update for the one last revision
4273 # update for the one last revision
4266 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4274 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4267
4275
4268 $ update for change coming with a new branch
4276 $ update for change coming with a new branch
4269 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4277 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4270 """
4278 """
4271 from mercurial import branchmap
4279 from mercurial import branchmap
4272 from mercurial import repoview
4280 from mercurial import repoview
4273
4281
4274 opts = _byteskwargs(opts)
4282 opts = _byteskwargs(opts)
4275 timer, fm = gettimer(ui, opts)
4283 timer, fm = gettimer(ui, opts)
4276 clearcaches = opts[b'clear_caches']
4284 clearcaches = opts[b'clear_caches']
4277 unfi = repo.unfiltered()
4285 unfi = repo.unfiltered()
4278 x = [None] # used to pass data between closure
4286 x = [None] # used to pass data between closure
4279
4287
4280 # we use a `list` here to avoid possible side effect from smartset
4288 # we use a `list` here to avoid possible side effect from smartset
4281 baserevs = list(scmutil.revrange(repo, base))
4289 baserevs = list(scmutil.revrange(repo, base))
4282 targetrevs = list(scmutil.revrange(repo, target))
4290 targetrevs = list(scmutil.revrange(repo, target))
4283 if not baserevs:
4291 if not baserevs:
4284 raise error.Abort(b'no revisions selected for --base')
4292 raise error.Abort(b'no revisions selected for --base')
4285 if not targetrevs:
4293 if not targetrevs:
4286 raise error.Abort(b'no revisions selected for --target')
4294 raise error.Abort(b'no revisions selected for --target')
4287
4295
4288 # make sure the target branchmap also contains the one in the base
4296 # make sure the target branchmap also contains the one in the base
4289 targetrevs = list(set(baserevs) | set(targetrevs))
4297 targetrevs = list(set(baserevs) | set(targetrevs))
4290 targetrevs.sort()
4298 targetrevs.sort()
4291
4299
4292 cl = repo.changelog
4300 cl = repo.changelog
4293 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4301 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4294 allbaserevs.sort()
4302 allbaserevs.sort()
4295 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4303 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4296
4304
4297 newrevs = list(alltargetrevs.difference(allbaserevs))
4305 newrevs = list(alltargetrevs.difference(allbaserevs))
4298 newrevs.sort()
4306 newrevs.sort()
4299
4307
4300 allrevs = frozenset(unfi.changelog.revs())
4308 allrevs = frozenset(unfi.changelog.revs())
4301 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4309 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4302 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4310 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4303
4311
4304 def basefilter(repo, visibilityexceptions=None):
4312 def basefilter(repo, visibilityexceptions=None):
4305 return basefilterrevs
4313 return basefilterrevs
4306
4314
4307 def targetfilter(repo, visibilityexceptions=None):
4315 def targetfilter(repo, visibilityexceptions=None):
4308 return targetfilterrevs
4316 return targetfilterrevs
4309
4317
4310 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4318 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4311 ui.status(msg % (len(allbaserevs), len(newrevs)))
4319 ui.status(msg % (len(allbaserevs), len(newrevs)))
4312 if targetfilterrevs:
4320 if targetfilterrevs:
4313 msg = b'(%d revisions still filtered)\n'
4321 msg = b'(%d revisions still filtered)\n'
4314 ui.status(msg % len(targetfilterrevs))
4322 ui.status(msg % len(targetfilterrevs))
4315
4323
4316 try:
4324 try:
4317 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4325 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4318 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4326 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4319
4327
4320 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4328 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4321 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4329 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4322
4330
4323 bcache = repo.branchmap()
4331 bcache = repo.branchmap()
4324 copy_method = 'copy'
4332 copy_method = 'copy'
4325
4333
4326 copy_base_kwargs = copy_base_kwargs = {}
4334 copy_base_kwargs = copy_base_kwargs = {}
4327 if hasattr(bcache, 'copy'):
4335 if hasattr(bcache, 'copy'):
4328 if 'repo' in getargspec(bcache.copy).args:
4336 if 'repo' in getargspec(bcache.copy).args:
4329 copy_base_kwargs = {"repo": baserepo}
4337 copy_base_kwargs = {"repo": baserepo}
4330 copy_target_kwargs = {"repo": targetrepo}
4338 copy_target_kwargs = {"repo": targetrepo}
4331 else:
4339 else:
4332 copy_method = 'inherit_for'
4340 copy_method = 'inherit_for'
4333 copy_base_kwargs = {"repo": baserepo}
4341 copy_base_kwargs = {"repo": baserepo}
4334 copy_target_kwargs = {"repo": targetrepo}
4342 copy_target_kwargs = {"repo": targetrepo}
4335
4343
4336 # try to find an existing branchmap to reuse
4344 # try to find an existing branchmap to reuse
4337 subsettable = getbranchmapsubsettable()
4345 subsettable = getbranchmapsubsettable()
4338 candidatefilter = subsettable.get(None)
4346 candidatefilter = subsettable.get(None)
4339 while candidatefilter is not None:
4347 while candidatefilter is not None:
4340 candidatebm = repo.filtered(candidatefilter).branchmap()
4348 candidatebm = repo.filtered(candidatefilter).branchmap()
4341 if candidatebm.validfor(baserepo):
4349 if candidatebm.validfor(baserepo):
4342 filtered = repoview.filterrevs(repo, candidatefilter)
4350 filtered = repoview.filterrevs(repo, candidatefilter)
4343 missing = [r for r in allbaserevs if r in filtered]
4351 missing = [r for r in allbaserevs if r in filtered]
4344 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4352 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4345 base.update(baserepo, missing)
4353 base.update(baserepo, missing)
4346 break
4354 break
4347 candidatefilter = subsettable.get(candidatefilter)
4355 candidatefilter = subsettable.get(candidatefilter)
4348 else:
4356 else:
4349 # no suitable subset where found
4357 # no suitable subset where found
4350 base = branchmap.branchcache()
4358 base = branchmap.branchcache()
4351 base.update(baserepo, allbaserevs)
4359 base.update(baserepo, allbaserevs)
4352
4360
4353 def setup():
4361 def setup():
4354 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4362 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4355 if clearcaches:
4363 if clearcaches:
4356 unfi._revbranchcache = None
4364 unfi._revbranchcache = None
4357 clearchangelog(repo)
4365 clearchangelog(repo)
4358
4366
4359 def bench():
4367 def bench():
4360 x[0].update(targetrepo, newrevs)
4368 x[0].update(targetrepo, newrevs)
4361
4369
4362 timer(bench, setup=setup)
4370 timer(bench, setup=setup)
4363 fm.end()
4371 fm.end()
4364 finally:
4372 finally:
4365 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4373 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4366 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4374 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4367
4375
4368
4376
4369 @command(
4377 @command(
4370 b'perf::branchmapload|perfbranchmapload',
4378 b'perf::branchmapload|perfbranchmapload',
4371 [
4379 [
4372 (b'f', b'filter', b'', b'Specify repoview filter'),
4380 (b'f', b'filter', b'', b'Specify repoview filter'),
4373 (b'', b'list', False, b'List brachmap filter caches'),
4381 (b'', b'list', False, b'List brachmap filter caches'),
4374 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4382 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4375 ]
4383 ]
4376 + formatteropts,
4384 + formatteropts,
4377 )
4385 )
4378 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4386 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4379 """benchmark reading the branchmap"""
4387 """benchmark reading the branchmap"""
4380 opts = _byteskwargs(opts)
4388 opts = _byteskwargs(opts)
4381 clearrevlogs = opts[b'clear_revlogs']
4389 clearrevlogs = opts[b'clear_revlogs']
4382
4390
4383 if list:
4391 if list:
4384 for name, kind, st in repo.cachevfs.readdir(stat=True):
4392 for name, kind, st in repo.cachevfs.readdir(stat=True):
4385 if name.startswith(b'branch2'):
4393 if name.startswith(b'branch2'):
4386 filtername = name.partition(b'-')[2] or b'unfiltered'
4394 filtername = name.partition(b'-')[2] or b'unfiltered'
4387 ui.status(
4395 ui.status(
4388 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4396 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4389 )
4397 )
4390 return
4398 return
4391 if not filter:
4399 if not filter:
4392 filter = None
4400 filter = None
4393 subsettable = getbranchmapsubsettable()
4401 subsettable = getbranchmapsubsettable()
4394 if filter is None:
4402 if filter is None:
4395 repo = repo.unfiltered()
4403 repo = repo.unfiltered()
4396 else:
4404 else:
4397 repo = repoview.repoview(repo, filter)
4405 repo = repoview.repoview(repo, filter)
4398
4406
4399 repo.branchmap() # make sure we have a relevant, up to date branchmap
4407 repo.branchmap() # make sure we have a relevant, up to date branchmap
4400
4408
4401 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4409 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4402 if fromfile is None:
4410 if fromfile is None:
4403 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4411 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4404 if fromfile is None:
4412 if fromfile is None:
4405 fromfile = branchmap.read
4413 fromfile = branchmap.read
4406
4414
4407 currentfilter = filter
4415 currentfilter = filter
4408 # try once without timer, the filter may not be cached
4416 # try once without timer, the filter may not be cached
4409 while fromfile(repo) is None:
4417 while fromfile(repo) is None:
4410 currentfilter = subsettable.get(currentfilter)
4418 currentfilter = subsettable.get(currentfilter)
4411 if currentfilter is None:
4419 if currentfilter is None:
4412 raise error.Abort(
4420 raise error.Abort(
4413 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4421 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4414 )
4422 )
4415 repo = repo.filtered(currentfilter)
4423 repo = repo.filtered(currentfilter)
4416 timer, fm = gettimer(ui, opts)
4424 timer, fm = gettimer(ui, opts)
4417
4425
4418 def setup():
4426 def setup():
4419 if clearrevlogs:
4427 if clearrevlogs:
4420 clearchangelog(repo)
4428 clearchangelog(repo)
4421
4429
4422 def bench():
4430 def bench():
4423 fromfile(repo)
4431 fromfile(repo)
4424
4432
4425 timer(bench, setup=setup)
4433 timer(bench, setup=setup)
4426 fm.end()
4434 fm.end()
4427
4435
4428
4436
4429 @command(b'perf::loadmarkers|perfloadmarkers')
4437 @command(b'perf::loadmarkers|perfloadmarkers')
4430 def perfloadmarkers(ui, repo):
4438 def perfloadmarkers(ui, repo):
4431 """benchmark the time to parse the on-disk markers for a repo
4439 """benchmark the time to parse the on-disk markers for a repo
4432
4440
4433 Result is the number of markers in the repo."""
4441 Result is the number of markers in the repo."""
4434 timer, fm = gettimer(ui)
4442 timer, fm = gettimer(ui)
4435 svfs = getsvfs(repo)
4443 svfs = getsvfs(repo)
4436 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4444 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4437 fm.end()
4445 fm.end()
4438
4446
4439
4447
4440 @command(
4448 @command(
4441 b'perf::lrucachedict|perflrucachedict',
4449 b'perf::lrucachedict|perflrucachedict',
4442 formatteropts
4450 formatteropts
4443 + [
4451 + [
4444 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4452 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4445 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4453 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4446 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4454 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4447 (b'', b'size', 4, b'size of cache'),
4455 (b'', b'size', 4, b'size of cache'),
4448 (b'', b'gets', 10000, b'number of key lookups'),
4456 (b'', b'gets', 10000, b'number of key lookups'),
4449 (b'', b'sets', 10000, b'number of key sets'),
4457 (b'', b'sets', 10000, b'number of key sets'),
4450 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4458 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4451 (
4459 (
4452 b'',
4460 b'',
4453 b'mixedgetfreq',
4461 b'mixedgetfreq',
4454 50,
4462 50,
4455 b'frequency of get vs set ops in mixed mode',
4463 b'frequency of get vs set ops in mixed mode',
4456 ),
4464 ),
4457 ],
4465 ],
4458 norepo=True,
4466 norepo=True,
4459 )
4467 )
4460 def perflrucache(
4468 def perflrucache(
4461 ui,
4469 ui,
4462 mincost=0,
4470 mincost=0,
4463 maxcost=100,
4471 maxcost=100,
4464 costlimit=0,
4472 costlimit=0,
4465 size=4,
4473 size=4,
4466 gets=10000,
4474 gets=10000,
4467 sets=10000,
4475 sets=10000,
4468 mixed=10000,
4476 mixed=10000,
4469 mixedgetfreq=50,
4477 mixedgetfreq=50,
4470 **opts
4478 **opts
4471 ):
4479 ):
4472 opts = _byteskwargs(opts)
4480 opts = _byteskwargs(opts)
4473
4481
4474 def doinit():
4482 def doinit():
4475 for i in _xrange(10000):
4483 for i in _xrange(10000):
4476 util.lrucachedict(size)
4484 util.lrucachedict(size)
4477
4485
4478 costrange = list(range(mincost, maxcost + 1))
4486 costrange = list(range(mincost, maxcost + 1))
4479
4487
4480 values = []
4488 values = []
4481 for i in _xrange(size):
4489 for i in _xrange(size):
4482 values.append(random.randint(0, _maxint))
4490 values.append(random.randint(0, _maxint))
4483
4491
4484 # Get mode fills the cache and tests raw lookup performance with no
4492 # Get mode fills the cache and tests raw lookup performance with no
4485 # eviction.
4493 # eviction.
4486 getseq = []
4494 getseq = []
4487 for i in _xrange(gets):
4495 for i in _xrange(gets):
4488 getseq.append(random.choice(values))
4496 getseq.append(random.choice(values))
4489
4497
4490 def dogets():
4498 def dogets():
4491 d = util.lrucachedict(size)
4499 d = util.lrucachedict(size)
4492 for v in values:
4500 for v in values:
4493 d[v] = v
4501 d[v] = v
4494 for key in getseq:
4502 for key in getseq:
4495 value = d[key]
4503 value = d[key]
4496 value # silence pyflakes warning
4504 value # silence pyflakes warning
4497
4505
4498 def dogetscost():
4506 def dogetscost():
4499 d = util.lrucachedict(size, maxcost=costlimit)
4507 d = util.lrucachedict(size, maxcost=costlimit)
4500 for i, v in enumerate(values):
4508 for i, v in enumerate(values):
4501 d.insert(v, v, cost=costs[i])
4509 d.insert(v, v, cost=costs[i])
4502 for key in getseq:
4510 for key in getseq:
4503 try:
4511 try:
4504 value = d[key]
4512 value = d[key]
4505 value # silence pyflakes warning
4513 value # silence pyflakes warning
4506 except KeyError:
4514 except KeyError:
4507 pass
4515 pass
4508
4516
4509 # Set mode tests insertion speed with cache eviction.
4517 # Set mode tests insertion speed with cache eviction.
4510 setseq = []
4518 setseq = []
4511 costs = []
4519 costs = []
4512 for i in _xrange(sets):
4520 for i in _xrange(sets):
4513 setseq.append(random.randint(0, _maxint))
4521 setseq.append(random.randint(0, _maxint))
4514 costs.append(random.choice(costrange))
4522 costs.append(random.choice(costrange))
4515
4523
4516 def doinserts():
4524 def doinserts():
4517 d = util.lrucachedict(size)
4525 d = util.lrucachedict(size)
4518 for v in setseq:
4526 for v in setseq:
4519 d.insert(v, v)
4527 d.insert(v, v)
4520
4528
4521 def doinsertscost():
4529 def doinsertscost():
4522 d = util.lrucachedict(size, maxcost=costlimit)
4530 d = util.lrucachedict(size, maxcost=costlimit)
4523 for i, v in enumerate(setseq):
4531 for i, v in enumerate(setseq):
4524 d.insert(v, v, cost=costs[i])
4532 d.insert(v, v, cost=costs[i])
4525
4533
4526 def dosets():
4534 def dosets():
4527 d = util.lrucachedict(size)
4535 d = util.lrucachedict(size)
4528 for v in setseq:
4536 for v in setseq:
4529 d[v] = v
4537 d[v] = v
4530
4538
4531 # Mixed mode randomly performs gets and sets with eviction.
4539 # Mixed mode randomly performs gets and sets with eviction.
4532 mixedops = []
4540 mixedops = []
4533 for i in _xrange(mixed):
4541 for i in _xrange(mixed):
4534 r = random.randint(0, 100)
4542 r = random.randint(0, 100)
4535 if r < mixedgetfreq:
4543 if r < mixedgetfreq:
4536 op = 0
4544 op = 0
4537 else:
4545 else:
4538 op = 1
4546 op = 1
4539
4547
4540 mixedops.append(
4548 mixedops.append(
4541 (op, random.randint(0, size * 2), random.choice(costrange))
4549 (op, random.randint(0, size * 2), random.choice(costrange))
4542 )
4550 )
4543
4551
4544 def domixed():
4552 def domixed():
4545 d = util.lrucachedict(size)
4553 d = util.lrucachedict(size)
4546
4554
4547 for op, v, cost in mixedops:
4555 for op, v, cost in mixedops:
4548 if op == 0:
4556 if op == 0:
4549 try:
4557 try:
4550 d[v]
4558 d[v]
4551 except KeyError:
4559 except KeyError:
4552 pass
4560 pass
4553 else:
4561 else:
4554 d[v] = v
4562 d[v] = v
4555
4563
4556 def domixedcost():
4564 def domixedcost():
4557 d = util.lrucachedict(size, maxcost=costlimit)
4565 d = util.lrucachedict(size, maxcost=costlimit)
4558
4566
4559 for op, v, cost in mixedops:
4567 for op, v, cost in mixedops:
4560 if op == 0:
4568 if op == 0:
4561 try:
4569 try:
4562 d[v]
4570 d[v]
4563 except KeyError:
4571 except KeyError:
4564 pass
4572 pass
4565 else:
4573 else:
4566 d.insert(v, v, cost=cost)
4574 d.insert(v, v, cost=cost)
4567
4575
4568 benches = [
4576 benches = [
4569 (doinit, b'init'),
4577 (doinit, b'init'),
4570 ]
4578 ]
4571
4579
4572 if costlimit:
4580 if costlimit:
4573 benches.extend(
4581 benches.extend(
4574 [
4582 [
4575 (dogetscost, b'gets w/ cost limit'),
4583 (dogetscost, b'gets w/ cost limit'),
4576 (doinsertscost, b'inserts w/ cost limit'),
4584 (doinsertscost, b'inserts w/ cost limit'),
4577 (domixedcost, b'mixed w/ cost limit'),
4585 (domixedcost, b'mixed w/ cost limit'),
4578 ]
4586 ]
4579 )
4587 )
4580 else:
4588 else:
4581 benches.extend(
4589 benches.extend(
4582 [
4590 [
4583 (dogets, b'gets'),
4591 (dogets, b'gets'),
4584 (doinserts, b'inserts'),
4592 (doinserts, b'inserts'),
4585 (dosets, b'sets'),
4593 (dosets, b'sets'),
4586 (domixed, b'mixed'),
4594 (domixed, b'mixed'),
4587 ]
4595 ]
4588 )
4596 )
4589
4597
4590 for fn, title in benches:
4598 for fn, title in benches:
4591 timer, fm = gettimer(ui, opts)
4599 timer, fm = gettimer(ui, opts)
4592 timer(fn, title=title)
4600 timer(fn, title=title)
4593 fm.end()
4601 fm.end()
4594
4602
4595
4603
4596 @command(
4604 @command(
4597 b'perf::write|perfwrite',
4605 b'perf::write|perfwrite',
4598 formatteropts
4606 formatteropts
4599 + [
4607 + [
4600 (b'', b'write-method', b'write', b'ui write method'),
4608 (b'', b'write-method', b'write', b'ui write method'),
4601 (b'', b'nlines', 100, b'number of lines'),
4609 (b'', b'nlines', 100, b'number of lines'),
4602 (b'', b'nitems', 100, b'number of items (per line)'),
4610 (b'', b'nitems', 100, b'number of items (per line)'),
4603 (b'', b'item', b'x', b'item that is written'),
4611 (b'', b'item', b'x', b'item that is written'),
4604 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4612 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4605 (b'', b'flush-line', None, b'flush after each line'),
4613 (b'', b'flush-line', None, b'flush after each line'),
4606 ],
4614 ],
4607 )
4615 )
4608 def perfwrite(ui, repo, **opts):
4616 def perfwrite(ui, repo, **opts):
4609 """microbenchmark ui.write (and others)"""
4617 """microbenchmark ui.write (and others)"""
4610 opts = _byteskwargs(opts)
4618 opts = _byteskwargs(opts)
4611
4619
4612 write = getattr(ui, _sysstr(opts[b'write_method']))
4620 write = getattr(ui, _sysstr(opts[b'write_method']))
4613 nlines = int(opts[b'nlines'])
4621 nlines = int(opts[b'nlines'])
4614 nitems = int(opts[b'nitems'])
4622 nitems = int(opts[b'nitems'])
4615 item = opts[b'item']
4623 item = opts[b'item']
4616 batch_line = opts.get(b'batch_line')
4624 batch_line = opts.get(b'batch_line')
4617 flush_line = opts.get(b'flush_line')
4625 flush_line = opts.get(b'flush_line')
4618
4626
4619 if batch_line:
4627 if batch_line:
4620 line = item * nitems + b'\n'
4628 line = item * nitems + b'\n'
4621
4629
4622 def benchmark():
4630 def benchmark():
4623 for i in pycompat.xrange(nlines):
4631 for i in pycompat.xrange(nlines):
4624 if batch_line:
4632 if batch_line:
4625 write(line)
4633 write(line)
4626 else:
4634 else:
4627 for i in pycompat.xrange(nitems):
4635 for i in pycompat.xrange(nitems):
4628 write(item)
4636 write(item)
4629 write(b'\n')
4637 write(b'\n')
4630 if flush_line:
4638 if flush_line:
4631 ui.flush()
4639 ui.flush()
4632 ui.flush()
4640 ui.flush()
4633
4641
4634 timer, fm = gettimer(ui, opts)
4642 timer, fm = gettimer(ui, opts)
4635 timer(benchmark)
4643 timer(benchmark)
4636 fm.end()
4644 fm.end()
4637
4645
4638
4646
4639 def uisetup(ui):
4647 def uisetup(ui):
4640 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4648 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4641 commands, b'debugrevlogopts'
4649 commands, b'debugrevlogopts'
4642 ):
4650 ):
4643 # for "historical portability":
4651 # for "historical portability":
4644 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4652 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4645 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4653 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4646 # openrevlog() should cause failure, because it has been
4654 # openrevlog() should cause failure, because it has been
4647 # available since 3.5 (or 49c583ca48c4).
4655 # available since 3.5 (or 49c583ca48c4).
4648 def openrevlog(orig, repo, cmd, file_, opts):
4656 def openrevlog(orig, repo, cmd, file_, opts):
4649 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4657 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4650 raise error.Abort(
4658 raise error.Abort(
4651 b"This version doesn't support --dir option",
4659 b"This version doesn't support --dir option",
4652 hint=b"use 3.5 or later",
4660 hint=b"use 3.5 or later",
4653 )
4661 )
4654 return orig(repo, cmd, file_, opts)
4662 return orig(repo, cmd, file_, opts)
4655
4663
4656 name = _sysstr(b'openrevlog')
4664 name = _sysstr(b'openrevlog')
4657 extensions.wrapfunction(cmdutil, name, openrevlog)
4665 extensions.wrapfunction(cmdutil, name, openrevlog)
4658
4666
4659
4667
4660 @command(
4668 @command(
4661 b'perf::progress|perfprogress',
4669 b'perf::progress|perfprogress',
4662 formatteropts
4670 formatteropts
4663 + [
4671 + [
4664 (b'', b'topic', b'topic', b'topic for progress messages'),
4672 (b'', b'topic', b'topic', b'topic for progress messages'),
4665 (b'c', b'total', 1000000, b'total value we are progressing to'),
4673 (b'c', b'total', 1000000, b'total value we are progressing to'),
4666 ],
4674 ],
4667 norepo=True,
4675 norepo=True,
4668 )
4676 )
4669 def perfprogress(ui, topic=None, total=None, **opts):
4677 def perfprogress(ui, topic=None, total=None, **opts):
4670 """printing of progress bars"""
4678 """printing of progress bars"""
4671 opts = _byteskwargs(opts)
4679 opts = _byteskwargs(opts)
4672
4680
4673 timer, fm = gettimer(ui, opts)
4681 timer, fm = gettimer(ui, opts)
4674
4682
4675 def doprogress():
4683 def doprogress():
4676 with ui.makeprogress(topic, total=total) as progress:
4684 with ui.makeprogress(topic, total=total) as progress:
4677 for i in _xrange(total):
4685 for i in _xrange(total):
4678 progress.increment()
4686 progress.increment()
4679
4687
4680 timer(doprogress)
4688 timer(doprogress)
4681 fm.end()
4689 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now